Compare commits

...

105 Commits
v0.2.0 ... main

Author SHA1 Message Date
Gaze
58f8497997 feat: add code to errs.PublicError 2024-11-26 14:39:27 +07:00
Gaze
920f7fe07b chore: go mod tidy 2024-11-22 14:23:53 +07:00
Gaze
0cb66232ef feat: add bip322 pkg 2024-11-22 14:22:07 +07:00
gazenw
4074548b3e Merge pull request #74 from gaze-network/develop
Release 0.7.0
2024-10-31 14:18:59 +07:00
gazenw
c5c9a7bdeb feat: add get Runes info batch api (#73)
* fix: make existing handlers use new total holders usecase

* fix: error msg

* feat: add get token info batch

* feat: add includeHoldersCount in get tokens api

* refactor: extract response mapping

* fix: rename new field and add holdersCount to extend

* fix: query params array

* fix: error msg

* fix: struct tags

* fix: remove error

* feat: add default value to additional fields
2024-10-31 14:14:58 +07:00
gazenw
58334dd3e4 Merge pull request #72 from gaze-network/develop
feat: add etching tx hash for runes info api
2024-10-26 20:56:01 +07:00
Gaze
cffe378beb feat: add etching tx hash for runes info api 2024-10-25 16:22:24 +07:00
gazenw
9a7ee49228 Merge pull request #71 from gaze-network/develop
Release v0.6.0
2024-10-17 14:35:19 +07:00
gazenw
9739f61067 feat: implement batch insert using multirow inserts (#70)
* feat: add new batch inserts

* fix: migration

* fix: add casting to unnest with patch

* fix: add UTC() to timestamp mappers

* chore: unused imports

* chore: remove unnecessary comments
2024-10-17 14:34:04 +07:00
gazenw
f1267b387e Merge pull request #69 from gaze-network/develop
Release v0.5.6
2024-10-15 17:59:34 +07:00
gazenw
8883c24c77 fix: only call VerifyStates if not api only (#68) 2024-10-15 17:58:51 +07:00
gazenw
e9ce8df01a Merge pull request #67 from gaze-network/develop
Release v0.5.5
2024-10-11 14:25:05 +07:00
Gaze
3ff73a99f8 Merge branch 'main' into develop 2024-10-11 14:24:36 +07:00
gazenw
96afdfd255 fix: order get ongoing tokens by mint transactions (#66) 2024-10-11 14:23:31 +07:00
gazenw
c49e39be97 feat: optimize flush speed for Runes (#65)
* feat: switch to batchexec for faster inserts

* feat: add time taken log

* feat: add process time log

* feat: add event logs
2024-10-11 14:21:26 +07:00
Gaze
12985ae432 feat: remove conns timeout 2024-10-08 01:14:24 +07:00
Gaze
2d51e52b83 feat: support to config 2024-10-08 01:12:49 +07:00
Gaze
618220d0cb feat: support high httpclient conns 2024-10-08 00:44:24 +07:00
gazenw
6004744721 Merge pull request #64 from gaze-network/develop
Release v0.5.1
2024-10-07 21:18:11 +07:00
gazenw
90ed7bc350 fix: update sql (#63) 2024-10-07 21:17:16 +07:00
gazenw
7a0fe84e40 Merge pull request #62 from gaze-network/develop
Release v0.5.0
2024-10-06 23:52:10 +07:00
gazenw
f1d4651042 feat(runes): add Get Transaction by hash api (#39)
* feat: implement pagination on get balance, get holders

* feat: paginate get transactions

* fix: remove debug

* feat: implement pagination in get utxos

* feat: sort response in get holders

* feat: cap batch query

* feat: add default limits to all endpoints

* chore: rename endpoint funcs

* fix: parse rune name spacers

* feat(runes): get tx by hash api

* fix: error

* refactor: use map to collect rune ids

---------

Co-authored-by: Gaze <gazenw@users.noreply.github.com>
2024-10-06 23:50:13 +07:00
gazenw
5f4f50a9e5 Merge pull request #61 from gaze-network/develop
Release v0.4.7
2024-10-06 21:00:57 +07:00
gazenw
32c3c5c1d4 fix: correctly insert etchedAt in rune entry (#60) 2024-10-06 20:55:38 +07:00
Gaze
2a572e6d1e fix: migration 2024-10-06 20:29:25 +07:00
Gaze
aa25a6882b Merge remote-tracking branch 'origin/main' into develop 2024-10-06 20:06:06 +07:00
gazenw
6182c63150 Merge pull request #59 from gaze-network/develop
Release v0.4.5
2024-10-06 19:59:35 +07:00
gazenw
e1f8eaa3e1 fix: unescape query id (#58) 2024-10-06 19:59:06 +07:00
gazenw
107836ae39 feat(runes): add Get Tokens API (#38)
* feat: implement pagination on get balance, get holders

* feat: paginate get transactions

* fix: remove debug

* feat: implement pagination in get utxos

* feat: sort response in get holders

* feat: cap batch query

* feat: add default limits to all endpoints

* chore: rename endpoint funcs

* fix: parse rune name spacers

* feat(runes): add get token list api

* fix(runes): use distinct to get token list

* feat: remove unused code

* fix: count holders distinct pkscript

* feat: implement additional scopes

* chore: comments

* feat: implement search

* refactor: switch to use paginationRequest

* refactor: rename get token list to get tokens

* fix: count total holders by rune ids

* fix: rename file

* fix: rename minting to ongoing

* fix: get ongoing check rune is mintable

* chore: disable gosec g115

* fix: pr

---------

Co-authored-by: Gaze <gazenw@users.noreply.github.com>
2024-10-06 19:30:57 +07:00
Gaze
1bd84b0154 fix: bump sqlc verify action version to 1.27.0 2024-10-06 15:41:10 +07:00
Gaze
de26a4c21d feat(errs): add retryable error 2024-10-06 11:12:51 +07:00
Gaze
1dc57d74e0 Merge remote-tracking branch 'origin/main' into develop 2024-10-05 01:38:34 +07:00
gazenw
7c0e28d8ea Merge pull request #57 from gaze-network/develop
Release 0.4.4
2024-10-05 01:37:50 +07:00
gazenw
754fd1e997 fix: only check for chain reorg if current block has hash (#56)
* fix: only check for chain reorg if current block has hash

* fix: remove starting block hash

* fix: don't use starting block hash
2024-10-05 01:35:04 +07:00
Gaze
66f03f7107 feat: allow custom sigHashType when signing 2024-10-04 23:05:48 +07:00
gazenw
7a863987ec Merge pull request #55 from gaze-network/develop
Release v0.4.3
2024-10-04 13:23:30 +07:00
gazenw
f9c6ef8dfd fix: add different genesis runes config for each network (#54)
* fix: add different genesis runes config for each network

* fix: use slogx.Stringer

* refactor: remove unused value
2024-10-04 13:22:53 +07:00
gazenw
22a32468ef Merge pull request #53 from gaze-network/develop
Release v0.4.2
2024-10-03 18:26:32 +07:00
gazenw
b1d9f4f574 feat: add fractal support for runes (#52)
* feat: add fractal support for runes

* chore: remove common.HalvingInterval

* fix: update starting block height

* refactor: move network-genesis-rune definition to constants

* fix: use logger.Panic() instead of panic()

* fix: golangci-lint

* fix: missing return
2024-10-03 18:25:13 +07:00
Gaze
6a5ba528a8 Merge branch 'main' into develop 2024-10-02 20:28:42 +07:00
Nut Pinyo
6484887710 feat: add dust limit util (#51)
* feat: add dust limit util

* fix: use int64 instead
2024-10-02 15:08:29 +07:00
Gaze
9a1382fb9f feat: add fee estimation util 2024-09-06 22:56:57 +07:00
Gaze
3d5f3b414c feat: add sign tx util functions 2024-09-06 21:58:02 +07:00
gazenw
6e8a846c27 Merge pull request #50 from gaze-network/develop
Release v0.4.1
2024-08-30 16:18:40 +07:00
gazenw
8b690c4f7f fix: add decimals field in runes get holders (#49) 2024-08-30 16:17:57 +07:00
Ň𝑒𝕣ⒻẸ𝔻
cc37807ff9 Turkish translation (#46)
* Move Turkish translation of README.md to docs/ directory

* Add Turkish translation of README.md with last updated date and community translation notice

* Update README.md

* Update README.md

* Update README.md

* Update README.md

* Update README.md

* Update README.md

---------

Co-authored-by: gazenw <163862510+gazenw@users.noreply.github.com>
2024-08-30 16:15:14 +07:00
gazenw
9ab16d21e1 Merge pull request #48 from gaze-network/develop
Release v0.4.1
2024-08-28 23:49:15 +07:00
gazenw
32fec89914 Merge pull request #47 from gaze-network/feat/fractal-network-support
feat: add fractal network constant
2024-08-28 23:48:17 +07:00
Gaze
0131de6717 feat: add network support for fractal 2024-08-28 23:34:54 +07:00
gazenw
206eb65ee7 Merge pull request #44 from gaze-network/develop
Release v0.4.0
2024-08-13 17:31:16 +07:00
gazenw
fa810b0aed feat: add get utxo by tx hash and output idx for Runes (#42)
* feat: add handler

* feat: add get transaction

* feat: add get utxos output

* refactor: function parameter

* feat: add check utxo not found

* feat: add sats to get utxo output api

* feat: add utxo sats entity

* feat: add get utxos output batch

* feat: handle error

* fix: context

* fix: sqlc queries

* fix: remove unused code

* fix: comment

* fix: check utxo not found error

* refactor: add some space

* fix: comment

* fix: use public field
2024-08-13 17:20:46 +07:00
waiemwor
dca63a49fe Modify Nodesale API to allow query all nodes from a deployment. (#43)
* feat: allow query all nodes from a deployment.

* fix : function GetNodesByDeployment name.
2024-08-13 16:35:56 +07:00
Gaze
05ade4b9d5 Merge branch 'main' into develop 2024-08-06 13:25:48 +07:00
Gaze
074458584b fix: adjust content type check 2024-08-06 13:25:36 +07:00
waiemwor
db5dc75c41 Feature/nodesale (#40)
* feat: recover nodesale module.

* fix: refactored.

* fix: fix table type.

* fix: add entity

* fix: bug UTC time.

* ci: try to tidy before testing

* ci: touch result file

* ci: use echo to create new file

* fix: try to skip test in ci

* fix: remove os.Exit

* fix: handle error

* feat: add todo note

* fix: Cannot run nodesale test because qtx is not initiated.

* fix: 50% chance public key compare incorrectly.

* fix: more consistent SQL

* fix: sanity refactor.

* fix: remove unused code.

* fix: move last_block_default to config file.

* fix: minor mistakes.

* fix:

* fix: refactor

* fix: refactor

* fix: delegate tx hash not record into db.

* refactor: prepare for moving integration tests.

* refactor: convert to unit tests.

* fix: change to using input values since output values deducted fee.

* feat: add extra unit test.

* fix: wrong timestamp format.

* fix: handle block timeout = 0

---------

Co-authored-by: Gaze <gazenw@users.noreply.github.com>
2024-08-05 11:33:20 +07:00
Gaze
0474627336 Merge branch 'main' into develop 2024-08-05 11:31:42 +07:00
Gaze
359436e6eb fix(httpclient): preserve trailing slash if exists 2024-08-01 14:43:36 +07:00
gazenw
1967895d6d Merge pull request #41 from gaze-network/develop
Release v0.3.0
2024-07-25 15:07:51 +07:00
gazenw
7dcbd082ee feat: add Runes API pagination (#36)
* feat: implement pagination on get balance, get holders

* feat: paginate get transactions

* fix: remove debug

* feat: implement pagination in get utxos

* feat: sort response in get holders

* feat: cap batch query

* feat: add default limits to all endpoints

* chore: rename endpoint funcs

* fix: parse rune name spacers

* chore: use compare.Cmp

* feat: handle not found errors on all usecase
2024-07-23 15:46:45 +07:00
gazenw
880f4b2e6a fix: handle case where input rune id is not found (#37) 2024-07-15 18:32:28 +07:00
Gaze
3f727dc11b Merge remote-tracking branch 'origin/main' into develop 2024-07-15 16:33:56 +07:00
Planxnx
60717ecc65 feat(requestlogger): add response headers 2024-07-12 00:18:15 +07:00
Planxnx
6998adedb0 fix(requestlogger): logging all request headers 2024-07-11 23:53:27 +07:00
Thanee Charattrakool
add0a541b5 feat: Request Logger fields (#35)
* feat: add with request headers config

* feat: add with fields config

* feat: format request queries
2024-07-11 23:41:18 +07:00
gazenw
dad02bf61a Merge pull request #34 from gaze-network/develop
feat: release v0.2.7
2024-07-09 16:15:35 +07:00
Gaze
694baef0aa chore: golangci-lint 2024-07-09 15:48:09 +07:00
gazenw
47119c3220 feat: remove unnecessary verbose query (#33) 2024-07-09 15:44:14 +07:00
gazenw
6203b104db Merge pull request #32 from gaze-network/develop
feat: release v0.2.5
2024-07-08 14:50:40 +07:00
gazenw
b24f27ec9a fix: incorrect condition for finding output destinations (#31) 2024-07-08 14:32:58 +07:00
Planxnx
90f1fd0a6c Merge branch 'fix/invalid-httpclient-path' 2024-07-04 15:39:17 +07:00
Planxnx
aace33b382 fix(httpclient): support base url query params 2024-07-04 15:39:04 +07:00
Gaze
a663f909fa Merge remote-tracking branch 'origin/main' into develop 2024-07-04 12:46:51 +07:00
Thanee Charattrakool
0263ec5622 Merge pull request #30 from gaze-network/fix/invalid-httpclient-path 2024-07-04 04:12:19 +07:00
Planxnx
8760baf42b chore: remive unused comment 2024-07-04 00:03:36 +07:00
Planxnx
5aca9f7f19 perf(httpclient): reduce base url parsing operation 2024-07-03 23:58:20 +07:00
Planxnx
07aa84019f fix(httpclient): can't support baseURL path 2024-07-03 23:57:40 +07:00
Thanee Charattrakool
a5fc803371 Merge pull request #29 from gaze-network/develop
feat: release v0.2.4
2024-07-02 15:57:44 +07:00
Planxnx
72ca151fd3 feat(httpclient): support content-encoding 2024-07-02 15:53:18 +07:00
Gaze
53a4d1a4c3 Merge branch 'main' into develop 2024-06-30 21:04:08 +07:00
Gaze
3322f4a034 ci: update action file name 2024-06-30 21:03:57 +07:00
Planxnx
dcb220bddb Merge branch 'main' into develop 2024-06-30 20:17:13 +07:00
gazenw
b6ff7e41bd docs: update README.md 2024-06-30 20:12:44 +07:00
gazenw
7cb717af11 feat(runes): get txs by block range (#28)
* feat(runes): get txs by block range

* feat(runes): validate block range

* perf(runes): limit 10k txs

---------

Co-authored-by: Gaze <gazenw@users.noreply.github.com>
2024-06-30 18:45:23 +07:00
Gaze
0d1ae0ef5e Merge branch 'main' into develop 2024-06-27 00:12:13 +07:00
Thanee Charattrakool
81ba7792ea fix: create error handler middleware (#27) 2024-06-27 00:11:22 +07:00
Gaze
b5851a39ab Merge branch 'main' into develop 2024-06-22 21:15:06 +07:00
Gaze
b44fb870a3 feat: add query params to req logger 2024-06-22 21:00:02 +07:00
Gaze
373ea50319 feat(logger): support env config 2024-06-20 18:52:56 +07:00
Gaze
a1d7524615 feat(btcutils): make btcutils.Address comparable support 2024-06-14 19:38:01 +07:00
Gaze
415a476478 Merge branch 'main' into develop 2024-06-14 16:55:39 +07:00
Gaze
f63505e173 feat(btcutils): use chain params instead common.network 2024-06-14 16:55:28 +07:00
Gaze
65a69ddb68 Merge remote-tracking branch 'origin/main' into develop 2024-06-14 16:48:48 +07:00
Thanee Charattrakool
4f5d1f077b feat(btcutils): add bitcoin utility functions (#26)
* feat(btcutils): add bitcoin utility functions

* feat(btcutils): add bitcoin signature verification
2024-06-14 16:48:22 +07:00
Gaze
c133006c82 Merge branch 'main' into develop 2024-06-12 23:39:24 +07:00
Thanee Charattrakool
51fd1f6636 feat: move requestip config to http config (#25) 2024-06-12 22:08:03 +07:00
Thanee Charattrakool
a7bc6257c4 feat(api): add request context and logger middleware (#24)
* feat(api): add request context and logger middleware

* feat(api): add cors and favicon middleware

* fix: solve wrapcheck linter warning

* feat: configurable hidden request headers
2024-06-12 21:47:29 +07:00
gazenw
3bb7500c87 feat: update docker version 2024-06-07 13:55:55 +07:00
Gaze
8c92893d4a feat: release v0.2.1 2024-05-31 01:16:34 +07:00
Nut Pinyo
d84e30ed11 fix: implement Shutdown() for processors (#22) 2024-05-31 01:13:12 +07:00
Thanee Charattrakool
d9fa217977 feat: use current indexed block for first prev block (#23)
* feat: use current indexed block for first prev block

* fix: forgot to set next prev header
2024-05-31 01:11:37 +07:00
Nut Pinyo
d4b694aa57 fix: implement Shutdown() for processors (#22) 2024-05-30 23:57:41 +07:00
Gaze
9febf40e81 Merge remote-tracking branch 'origin/main' into develop 2024-05-27 14:33:00 +07:00
Thanee Charattrakool
709b00ec0e build: add Docker cache mound for Go modules (#21)
* build: add cache mount for go modules

* doc(docker): update TZ description

* build: use entrypoint instead cmd exec

* build: add dockerignore

* build: add modules dir to image for migration command

* build: update dockerignore

* doc: fix typo

Co-authored-by: gazenw <163862510+gazenw@users.noreply.github.com>

---------

Co-authored-by: gazenw <163862510+gazenw@users.noreply.github.com>
2024-05-23 17:10:03 +07:00
gazenw
50ae103502 doc: update docker compose example 2024-05-21 14:44:59 +07:00
gazenw
c0242bd555 Update README.md 2024-05-20 18:37:32 +07:00
142 changed files with 12650 additions and 853 deletions

18
.dockerignore Normal file
View File

@@ -0,0 +1,18 @@
.git
.gitignore
.github
.vscode
**/*.md
**/*.log
.DS_Store
# Docker
Dockerfile
.dockerignore
docker-compose.yml
# Go
.golangci.yaml
cmd.local
config.*.y*ml
config.y*ml

View File

@@ -58,6 +58,9 @@ jobs:
cache: true # caching and restoring go modules and build outputs.
- run: echo "GOVERSION=$(go version)" >> $GITHUB_ENV
- name: Touch test result file
run: echo "" > test_output.json
- name: Build
run: go build -v ./...

View File

@@ -22,7 +22,7 @@ jobs:
- name: Setup Sqlc
uses: sqlc-dev/setup-sqlc@v4
with:
sqlc-version: "1.26.0"
sqlc-version: "1.27.0"
- name: Check Diff
run: sqlc diff

View File

@@ -101,3 +101,6 @@ linters-settings:
attr-only: true
key-naming-case: snake
args-on-sep-lines: true
gosec:
excludes:
- G115

View File

@@ -39,7 +39,7 @@
"ui.completion.usePlaceholders": false,
"ui.diagnostic.analyses": {
// https://github.com/golang/tools/blob/master/gopls/doc/analyzers.md
// "fieldalignment": false,
"fieldalignment": false,
"nilness": true,
"shadow": false,
"unusedparams": true,

View File

@@ -3,15 +3,15 @@ FROM golang:1.22 as builder
WORKDIR /app
COPY go.mod go.sum ./
RUN go mod download
RUN --mount=type=cache,target=/go/pkg/mod/ go mod download
COPY ./ ./
ENV GOOS=linux
ENV CGO_ENABLED=0
RUN go build \
-o main ./main.go
RUN --mount=type=cache,target=/go/pkg/mod/ \
go build -o main ./main.go
FROM alpine:latest
@@ -19,9 +19,10 @@ WORKDIR /app
RUN apk --no-cache add ca-certificates tzdata
COPY --from=builder /app/main .
COPY --from=builder /app/modules ./modules
# You can set `TZ` environment variable to change the timezone
# You can set TZ identifier to change the timezone, See https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
# ENV TZ=US/Central
CMD ["/app/main", "run"]
ENTRYPOINT ["/app/main"]

View File

@@ -1,8 +1,10 @@
<!-- omit from toc -->
- [Türkçe](https://github.com/Rumeyst/gaze-indexer/blob/turkish-translation/docs/README_tr.md)
# Gaze Indexer
Gaze Indexer is an open-source and modular indexing client for Bitcoin meta-protocols. It has support for Runes out of the box, with **Unified Consistent APIs** across fungible token protocols.
Gaze Indexer is an open-source and modular indexing client for Bitcoin meta-protocols with **Unified Consistent APIs** across fungible token protocols.
Gaze Indexer is built with **modularity** in mind, allowing users to run all modules in one monolithic instance with a single command, or as a distributed cluster of micro-services.
@@ -25,7 +27,7 @@ This allows developers to focus on what **truly** matters: Meta-protocol indexin
### 1. Runes
The Runes Indexer is our first meta-protocol indexer. It indexes Runes states, transactions, runestones, and balances using Bitcoin transactions.
It comes with a set of APIs for querying historical Runes data. See our [API Reference](https://documenter.getpostman.com/view/28396285/2sA3Bn7Cxr) for full details.
It comes with a set of APIs for querying historical Runes data. See our [API Reference](https://api-docs.gaze.network) for full details.
## Installation
@@ -51,8 +53,6 @@ Here is our minimum database disk space requirement for each module.
| ------ | -------------------------- | ---------------------------- |
| Runes | 10 GB | 150 GB |
Here is our minimum database disk space requirement for each module.
#### 4. Prepare `config.yaml` file.
```yaml
@@ -108,14 +108,14 @@ We will be using `docker-compose` for our installation guide. Make sure the `doc
# docker-compose.yaml
services:
gaze-indexer:
image: ghcr.io/gaze-network/gaze-indexer:v1.0.0
image: ghcr.io/gaze-network/gaze-indexer:v0.2.1
container_name: gaze-indexer
restart: unless-stopped
ports:
- 8080:8080 # Expose HTTP server port to host
volumes:
- "./config.yaml:/app/config.yaml" # mount config.yaml file to the container as "/app/config.yaml"
command: ["/app/main", "run", "--runes"] # Put module flags after "run" commands to select which modules to run.
command: ["/app/main", "run", "--modules", "runes"] # Put module flags after "run" commands to select which modules to run.
```
### Install from source

View File

@@ -17,15 +17,21 @@ import (
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/indexer"
"github.com/gaze-network/indexer-network/internal/config"
"github.com/gaze-network/indexer-network/modules/nodesale"
"github.com/gaze-network/indexer-network/modules/runes"
"github.com/gaze-network/indexer-network/pkg/automaxprocs"
"github.com/gaze-network/indexer-network/pkg/errorhandler"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
"github.com/gaze-network/indexer-network/pkg/middleware/errorhandler"
"github.com/gaze-network/indexer-network/pkg/middleware/requestcontext"
"github.com/gaze-network/indexer-network/pkg/middleware/requestlogger"
"github.com/gaze-network/indexer-network/pkg/reportingclient"
"github.com/gofiber/fiber/v2"
"github.com/gofiber/fiber/v2/middleware/compress"
"github.com/gofiber/fiber/v2/middleware/cors"
"github.com/gofiber/fiber/v2/middleware/favicon"
fiberrecover "github.com/gofiber/fiber/v2/middleware/recover"
"github.com/gofiber/fiber/v2/middleware/requestid"
"github.com/samber/do/v2"
"github.com/samber/lo"
"github.com/spf13/cobra"
@@ -34,6 +40,7 @@ import (
// Register Modules
var Modules = do.Package(
do.LazyNamed("runes", runes.New),
do.LazyNamed("nodesale", nodesale.New),
)
func NewRunCommand() *cobra.Command {
@@ -131,10 +138,26 @@ func runHandler(cmd *cobra.Command, _ []string) error {
// Initialize HTTP server
do.Provide(injector, func(i do.Injector) (*fiber.App, error) {
app := fiber.New(fiber.Config{
AppName: "Gaze Indexer",
ErrorHandler: errorhandler.NewHTTPErrorHandler(),
AppName: "Gaze Indexer",
ErrorHandler: func(c *fiber.Ctx, err error) error {
logger.ErrorContext(c.UserContext(), "Something went wrong, unhandled api error",
slogx.String("event", "api_unhandled_error"),
slogx.Error(err),
)
return errors.WithStack(c.Status(http.StatusInternalServerError).JSON(fiber.Map{
"error": "Internal Server Error",
}))
},
})
app.
Use(favicon.New()).
Use(cors.New()).
Use(requestid.New()).
Use(requestcontext.New(
requestcontext.WithRequestId(),
requestcontext.WithClientIP(conf.HTTPServer.RequestIP),
)).
Use(requestlogger.New(conf.HTTPServer.Logger)).
Use(fiberrecover.New(fiberrecover.Config{
EnableStackTrace: true,
StackTraceHandler: func(c *fiber.Ctx, e interface{}) {
@@ -143,6 +166,7 @@ func runHandler(cmd *cobra.Command, _ []string) error {
logger.ErrorContext(c.UserContext(), "Something went wrong, panic in http handler", slogx.Any("panic", e), slog.String("stacktrace", string(buf)))
},
})).
Use(errorhandler.New()).
Use(compress.New(compress.Config{
Level: compress.LevelDefault,
}))

View File

@@ -6,13 +6,15 @@ import (
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/constants"
"github.com/gaze-network/indexer-network/modules/runes"
"github.com/gaze-network/indexer-network/modules/nodesale"
runesconstants "github.com/gaze-network/indexer-network/modules/runes/constants"
"github.com/spf13/cobra"
)
var versions = map[string]string{
"": constants.Version,
"runes": runes.Version,
"": constants.Version,
"runes": runesconstants.Version,
"nodesale": nodesale.Version,
}
type versionCmdOptions struct {

View File

@@ -1,4 +0,0 @@
package common
// HalvingInterval is the number of blocks between each halving event.
const HalvingInterval = 210_000

View File

@@ -24,6 +24,9 @@ var (
// Skippable is returned when got an error but it can be skipped or ignored and continue
Skippable = errors.NewWithDepth(depth, "skippable")
// Retryable is returned when got an error but it can be retried
Retryable = errors.NewWithDepth(depth, "retryable")
// Unsupported is returned when a feature or result is not supported
Unsupported = errors.NewWithDepth(depth, "unsupported")

View File

@@ -11,6 +11,7 @@ import (
type PublicError struct {
err error
message string
code string // code is optional, it can be used to identify the error type
}
func (p PublicError) Error() string {
@@ -21,6 +22,10 @@ func (p PublicError) Message() string {
return p.message
}
func (p PublicError) Code() string {
return p.code
}
func (p PublicError) Unwrap() error {
return p.err
}
@@ -29,6 +34,10 @@ func NewPublicError(message string) error {
return withstack.WithStackDepth(&PublicError{err: errors.New(message), message: message}, 1)
}
func NewPublicErrorWithCode(message string, code string) error {
return withstack.WithStackDepth(&PublicError{err: errors.New(message), message: message, code: code}, 1)
}
func WithPublicMessage(err error, prefix string) error {
if err == nil {
return nil
@@ -41,3 +50,16 @@ func WithPublicMessage(err error, prefix string) error {
}
return withstack.WithStackDepth(&PublicError{err: err, message: message}, 1)
}
func WithPublicMessageCode(err error, prefix string, code string) error {
if err == nil {
return nil
}
var message string
if prefix != "" {
message = fmt.Sprintf("%s: %s", prefix, err.Error())
} else {
message = err.Error()
}
return withstack.WithStackDepth(&PublicError{err: err, message: message, code: code}, 1)
}

View File

@@ -1,22 +1,31 @@
package common
import "github.com/btcsuite/btcd/chaincfg"
import (
"github.com/btcsuite/btcd/chaincfg"
"github.com/gaze-network/indexer-network/pkg/logger"
)
type Network string
const (
NetworkMainnet Network = "mainnet"
NetworkTestnet Network = "testnet"
NetworkMainnet Network = "mainnet"
NetworkTestnet Network = "testnet"
NetworkFractalMainnet Network = "fractal-mainnet"
NetworkFractalTestnet Network = "fractal-testnet"
)
var supportedNetworks = map[Network]struct{}{
NetworkMainnet: {},
NetworkTestnet: {},
NetworkMainnet: {},
NetworkTestnet: {},
NetworkFractalMainnet: {},
NetworkFractalTestnet: {},
}
var chainParams = map[Network]*chaincfg.Params{
NetworkMainnet: &chaincfg.MainNetParams,
NetworkTestnet: &chaincfg.TestNet3Params,
NetworkMainnet: &chaincfg.MainNetParams,
NetworkTestnet: &chaincfg.TestNet3Params,
NetworkFractalMainnet: &chaincfg.MainNetParams,
NetworkFractalTestnet: &chaincfg.MainNetParams,
}
func (n Network) IsSupported() bool {
@@ -31,3 +40,15 @@ func (n Network) ChainParams() *chaincfg.Params {
func (n Network) String() string {
return string(n)
}
func (n Network) HalvingInterval() uint64 {
switch n {
case NetworkMainnet, NetworkTestnet:
return 210_000
case NetworkFractalMainnet, NetworkFractalTestnet:
return 2_100_000
default:
logger.Panic("invalid network")
return 0
}
}

View File

@@ -23,6 +23,14 @@ reporting:
# HTTP server configuration options.
http_server:
port: 8080 # Port to run the HTTP server on for modules with HTTP API handlers.
logger:
disable: false # disable logger if logger level is `INFO`
request_header: false
request_query: false
requestip: # Client IP extraction configuration options. This is unnecessary if you don't care about the real client IP or if you're not using a reverse proxy.
trusted_proxies_ip: # Cloudflare, GCP Public LB. See: server/internal/middleware/requestcontext/PROXY-IP.md
trusted_proxies_header: # X-Real-IP, CF-Connecting-IP
enable_reject_malformed_request: false # return 403 if request is malformed (invalid IP)
# Meta-protocol modules configuration options.
modules:
@@ -39,3 +47,11 @@ modules:
password: "password"
db_name: "postgres"
# url: "postgres://postgres:password@localhost:5432/postgres?sslmode=prefer" # [Optional] This will override other database credentials above.
nodesale:
postgres:
host: "localhost"
port: 5432
user: "postgres"
password: "P@ssw0rd"
db_name: "postgres"
last_block_default: 400

View File

@@ -1,5 +1,5 @@
package constants
const (
Version = "v0.0.1"
Version = "v0.2.1"
)

View File

@@ -243,39 +243,32 @@ func (d *BitcoinNodeDatasource) prepareRange(fromHeight, toHeight int64) (start,
}
// GetTransaction fetch transaction from Bitcoin node
func (d *BitcoinNodeDatasource) GetTransactionByHash(ctx context.Context, txHash chainhash.Hash) (*types.Transaction, error) {
func (d *BitcoinNodeDatasource) GetRawTransactionAndHeightByTxHash(ctx context.Context, txHash chainhash.Hash) (*wire.MsgTx, int64, error) {
rawTxVerbose, err := d.btcclient.GetRawTransactionVerbose(&txHash)
if err != nil {
return nil, errors.Wrap(err, "failed to get raw transaction")
return nil, 0, errors.Wrap(err, "failed to get raw transaction")
}
blockHash, err := chainhash.NewHashFromStr(rawTxVerbose.BlockHash)
if err != nil {
return nil, errors.Wrap(err, "failed to parse block hash")
return nil, 0, errors.Wrap(err, "failed to parse block hash")
}
block, err := d.btcclient.GetBlockVerboseTx(blockHash)
block, err := d.btcclient.GetBlockVerbose(blockHash)
if err != nil {
return nil, errors.Wrap(err, "failed to get block header")
return nil, 0, errors.Wrap(err, "failed to get block header")
}
// parse tx
txBytes, err := hex.DecodeString(rawTxVerbose.Hex)
if err != nil {
return nil, errors.Wrap(err, "failed to decode transaction hex")
return nil, 0, errors.Wrap(err, "failed to decode transaction hex")
}
var msgTx wire.MsgTx
if err := msgTx.Deserialize(bytes.NewReader(txBytes)); err != nil {
return nil, errors.Wrap(err, "failed to deserialize transaction")
}
var txIndex uint32
for i, tx := range block.Tx {
if tx.Hex == rawTxVerbose.Hex {
txIndex = uint32(i)
break
}
return nil, 0, errors.Wrap(err, "failed to deserialize transaction")
}
return types.ParseMsgTx(&msgTx, block.Height, *blockHash, txIndex), nil
return &msgTx, block.Height, nil
}
// GetBlockHeader fetch block header from Bitcoin node
@@ -292,3 +285,12 @@ func (d *BitcoinNodeDatasource) GetBlockHeader(ctx context.Context, height int64
return types.ParseMsgBlockHeader(*block, height), nil
}
func (d *BitcoinNodeDatasource) GetRawTransactionByTxHash(ctx context.Context, txHash chainhash.Hash) (*wire.MsgTx, error) {
transaction, err := d.btcclient.GetRawTransaction(&txHash)
if err != nil {
return nil, errors.Wrap(err, "failed to get raw transaction")
}
return transaction.MsgTx(), nil
}

View File

@@ -6,6 +6,7 @@ import (
"sync"
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/datasources"
@@ -91,6 +92,10 @@ func (i *Indexer[T]) Run(ctx context.Context) (err error) {
select {
case <-i.quit:
logger.InfoContext(ctx, "Got quit signal, stopping indexer")
if err := i.Processor.Shutdown(ctx); err != nil {
logger.ErrorContext(ctx, "Failed to shutdown processor", slogx.Error(err))
return errors.Wrap(err, "processor shutdown failed")
}
return nil
case <-ctx.Done():
return nil
@@ -138,7 +143,7 @@ func (i *Indexer[T]) process(ctx context.Context) (err error) {
// validate reorg from first input
{
remoteBlockHeader := firstInputHeader
if !remoteBlockHeader.PrevBlock.IsEqual(&i.currentBlock.Hash) {
if i.currentBlock.Hash != (chainhash.Hash{}) && !remoteBlockHeader.PrevBlock.IsEqual(&i.currentBlock.Hash) {
logger.WarnContext(ctx, "Detected chain reorganization. Searching for fork point...",
slogx.String("event", "reorg_detected"),
slogx.Stringer("current_hash", i.currentBlock.Hash),
@@ -204,19 +209,20 @@ func (i *Indexer[T]) process(ctx context.Context) (err error) {
}
// validate is input is continuous and no reorg
for i := 1; i < len(inputs); i++ {
header := inputs[i].BlockHeader()
prevHeader := inputs[i-1].BlockHeader()
prevHeader := i.currentBlock
for i, input := range inputs {
header := input.BlockHeader()
if header.Height != prevHeader.Height+1 {
return errors.Wrapf(errs.InternalError, "input is not continuous, input[%d] height: %d, input[%d] height: %d", i-1, prevHeader.Height, i, header.Height)
}
if !header.PrevBlock.IsEqual(&prevHeader.Hash) {
if prevHeader.Hash != (chainhash.Hash{}) && !header.PrevBlock.IsEqual(&prevHeader.Hash) {
logger.WarnContext(ctx, "Chain Reorganization occurred in the middle of batch fetching inputs, need to try to fetch again")
// end current round
return nil
}
prevHeader = header
}
ctx = logger.WithContext(ctx, slog.Int("total_inputs", len(inputs)))

View File

@@ -29,6 +29,9 @@ type Processor[T Input] interface {
// VerifyStates verifies the states of the indexed data and the indexer
// to ensure the last shutdown was graceful and no missing data.
VerifyStates(ctx context.Context) error
// Shutdown gracefully stops the processor. Database connections, network calls, leftover states, etc. should be closed and cleaned up here.
Shutdown(ctx context.Context) error
}
type IndexerWorker interface {

165
docs/README_tr.md Normal file
View File

@@ -0,0 +1,165 @@
## Çeviriler
- [English (İngilizce)](../README.md)
**Son Güncelleme:** 21 Ağustos 2024
> **Not:** Bu belge, topluluk tarafından yapılmış bir çeviridir. Ana README.md dosyasındaki güncellemeler buraya otomatik olarak yansıtılmayabilir. En güncel bilgiler için [İngilizce sürümü](../README.md) inceleyin.
# Gaze Indexer
Gaze Indexer, değiştirilebilir token protokolleri arasında **Birleştirilmiş Tutarlı API'lere** sahip Bitcoin meta-protokolleri için açık kaynaklı ve modüler bir indeksleme istemcisidir.
Gaze Indexer, kullanıcıların tüm modülleri tek bir komutla tek bir monolitik örnekte veya dağıtılmış bir mikro hizmet kümesi olarak çalıştırmasına olanak tanıyan **modülerlik** göz önünde bulundurularak oluşturulmuştur.
Gaze Indexer, verimli veri getirme, yeniden düzenleme algılama ve veritabanı taşıma aracı ile HERHANGİ bir meta-protokol indeksleyici oluşturmak için bir temel görevi görür.
Bu, geliştiricilerin **gerçekten** önemli olana odaklanmasını sağlar: Meta-protokol indeksleme mantığı. Yeni meta-protokoller, yeni modüller uygulanarak kolayca eklenebilir.
- [Modüller](#modules)
- [1. Runes](#1-runes)
- [Kurulum](#installation)
- [Önkoşullar](#prerequisites)
- [1. Donanım Gereksinimleri](#1-hardware-requirements)
- [2. Bitcoin Core RPC sunucusunu hazırlayın.](#2-prepare-bitcoin-core-rpc-server)
- [3. Veritabanı hazırlayın.](#3-prepare-database)
- [4. `config.yaml` dosyasını hazırlayın.](#4-prepare-configyaml-file)
- [Docker ile yükle (önerilir)](#install-with-docker-recommended)
- [Kaynaktan yükle](#install-from-source)
## Modüller
### 1. Runes
Runes Dizinleyici ilk meta-protokol dizinleyicimizdir. Bitcoin işlemlerini kullanarak Runes durumlarını, işlemlerini, rün taşlarını ve bakiyelerini indeksler.
Geçmiş Runes verilerini sorgulamak için bir dizi API ile birlikte gelir. Tüm ayrıntılar için [API Referansı] (https://api-docs.gaze.network) adresimize bakın.
## Kurulum
### Önkoşullar
#### 1. Donanım Gereksinimleri
Her modül farklı donanım gereksinimleri gerektirir.
| Modül | CPU | RAM |
| ------ | --------- | ---- |
| Runes | 0,5 çekirdek | 1 GB |
#### 2. Bitcoin Core RPC sunucusunu hazırlayın.
Gaze Indexer'ın işlem verilerini kendi barındırdığı ya da QuickNode gibi yönetilen sağlayıcıları kullanan bir Bitcoin Core RPC'den alması gerekir.
Bir Bitcoin Core'u kendiniz barındırmak için bkz. https://bitcoin.org/en/full-node.
#### 3. Veritabanını hazırlayın.
Gaze Indexer PostgreSQL için birinci sınıf desteğe sahiptir. Diğer veritabanlarını kullanmak isterseniz, her modülün Veri Ağ Geçidi arayüzünü karşılayan kendi veritabanı havuzunuzu uygulayabilirsiniz.
İşte her modül için minimum veritabanı disk alanı gereksinimimiz.
| Modül | Veritabanı Depolama Alanı (mevcut) | Veritabanı Depolama Alanı (1 yıl içinde) |
| ------ | -------------------------- | ---------------------------- |
| Runes | 10 GB | 150 GB |
#### 4. config.yaml` dosyasını hazırlayın.
```yaml
# config.yaml
logger:
output: TEXT # Output format for logs. current supported formats: "TEXT" | "JSON" | "GCP"
debug: false
# Network to run the indexer on. Current supported networks: "mainnet" | "testnet"
network: mainnet
# Bitcoin Core RPC configuration options.
bitcoin_node:
host: "" # [Required] Host of Bitcoin Core RPC (without https://)
user: "" # Username to authenticate with Bitcoin Core RPC
pass: "" # Password to authenticate with Bitcoin Core RPC
disable_tls: false # Set to true to disable tls
# Block reporting configuration options. See Block Reporting section for more details.
reporting:
disabled: false # Set to true to disable block reporting to Gaze Network. Default is false.
base_url: "https://indexer.api.gaze.network" # Defaults to "https://indexer.api.gaze.network" if left empty
name: "" # [Required if not disabled] Name of this indexer to show on the Gaze Network dashboard
website_url: "" # Public website URL to show on the dashboard. Can be left empty.
indexer_api_url: "" # Public url to access this indexer's API. Can be left empty if you want to keep your indexer private.
# HTTP server configuration options.
http_server:
port: 8080 # Port to run the HTTP server on for modules with HTTP API handlers.
# Meta-protocol modules configuration options.
modules:
# Configuration options for Runes module. Can be removed if not used.
runes:
database: "postgres" # Database to store Runes data. current supported databases: "postgres"
datasource: "bitcoin-node" # Data source to be used for Bitcoin data. current supported data sources: "bitcoin-node".
api_handlers: # API handlers to enable. current supported handlers: "http"
- http
postgres:
host: "localhost"
port: 5432
user: "postgres"
password: "password"
db_name: "postgres"
# url: "postgres://postgres:password@localhost:5432/postgres?sslmode=prefer" # [Optional] This will override other database credentials above.
```
### Docker ile yükleyin (önerilir)
Kurulum kılavuzumuz için `docker-compose` kullanacağız. Docker-compose.yaml` dosyasının `config.yaml` dosyası ile aynı dizinde olduğundan emin olun.
```yaml
# docker-compose.yaml
services:
gaze-indexer:
image: ghcr.io/gaze-network/gaze-indexer:v0.2.1
container_name: gaze-indexer
restart: unless-stopped
ports:
- 8080:8080 # Expose HTTP server port to host
volumes:
- "./config.yaml:/app/config.yaml" # mount config.yaml file to the container as "/app/config.yaml"
command: ["/app/main", "run", "--modules", "runes"] # Put module flags after "run" commands to select which modules to run.
```
### Kaynaktan yükleyin
1. Go` sürüm 1.22 veya daha üstünü yükleyin. Go kurulum kılavuzuna bakın [burada](https://go.dev/doc/install).
2. Bu depoyu klonlayın.
```bash
git clone https://github.com/gaze-network/gaze-indexer.git
cd gaze-indexer
```
3. Ana ikili dosyayı oluşturun.
```bash
# Bağımlılıkları al
go mod indir
# Ana ikili dosyayı oluşturun
go build -o gaze main.go
```
4. Veritabanı geçişlerini `migrate` komutu ve modül bayrakları ile çalıştırın.
```bash
./gaze migrate up --runes --database postgres://postgres:password@localhost:5432/postgres
```
5. Dizinleyiciyi `run` komutu ve modül bayrakları ile başlatın.
```bash
./gaze run --modules runes
```
Eğer `config.yaml` dosyası `./app/config.yaml` adresinde bulunmuyorsa, `config.yaml` dosyasının yolunu belirtmek için `--config` bayrağını kullanın.
```bash
./gaze run --modules runes --config /path/to/config.yaml
```
## Çeviriler
- [English (İngilizce)](../README.md)

23
go.mod
View File

@@ -6,6 +6,7 @@ require (
github.com/Cleverse/go-utilities/utils v0.0.0-20240119201306-d71eb577ef11
github.com/btcsuite/btcd v0.24.0
github.com/btcsuite/btcd/btcutil v1.1.5
github.com/btcsuite/btcd/btcutil/psbt v1.1.9
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0
github.com/cockroachdb/errors v1.11.1
github.com/gaze-network/uint128 v1.3.0
@@ -20,23 +21,27 @@ require (
github.com/spf13/cobra v1.8.0
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.18.2
github.com/stretchr/testify v1.8.4
github.com/stretchr/testify v1.9.0
github.com/valyala/fasthttp v1.51.0
go.uber.org/automaxprocs v1.5.3
golang.org/x/sync v0.5.0
golang.org/x/sync v0.7.0
google.golang.org/protobuf v1.33.0
)
require github.com/stretchr/objx v0.5.2 // indirect
require (
github.com/andybalholm/brotli v1.0.5 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.1.3 // indirect
github.com/bitonicnl/verify-signed-message v0.7.1
github.com/btcsuite/btcd/btcec/v2 v2.3.3
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd // indirect
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/redact v1.1.5 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/decred/dcrd/crypto/blake256 v1.0.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
github.com/decred/dcrd/crypto/blake256 v1.0.1 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
@@ -74,10 +79,10 @@ require (
github.com/valyala/tcplisten v1.0.0 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
golang.org/x/crypto v0.20.0 // indirect
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
golang.org/x/sys v0.17.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/crypto v0.23.0 // indirect
golang.org/x/exp v0.0.0-20240525044651-4c93da0ed11d // indirect
golang.org/x/sys v0.20.0 // indirect
golang.org/x/text v0.15.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

54
go.sum
View File

@@ -7,18 +7,23 @@ github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs=
github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/bitonicnl/verify-signed-message v0.7.1 h1:1Qku9k9WgzobjqBY7tT3CLjWxtTJZxkYNhOV6QeCTjY=
github.com/bitonicnl/verify-signed-message v0.7.1/go.mod h1:PR60twfJIaHEo9Wb6eJBh8nBHEZIQQx8CvRwh0YmEPk=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M=
github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A=
github.com/btcsuite/btcd v0.24.0 h1:gL3uHE/IaFj6fcZSu03SvqPMSx7s/dPzfpG/atRwWdo=
github.com/btcsuite/btcd v0.24.0/go.mod h1:K4IDc1593s8jKXIF7yS7yCTSxrknB9z0STzc2j6XgE4=
github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA=
github.com/btcsuite/btcd/btcec/v2 v2.1.3 h1:xM/n3yIhHAhHy04z4i43C8p4ehixJZMsnrVJkgl+MTE=
github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE=
github.com/btcsuite/btcd/btcec/v2 v2.3.3 h1:6+iXlDKE8RMtKsvK0gshlXIuPbyWM/h84Ensb7o3sC0=
github.com/btcsuite/btcd/btcec/v2 v2.3.3/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A=
github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE=
github.com/btcsuite/btcd/btcutil v1.1.5 h1:+wER79R5670vs/ZusMTF1yTcRYE5GUsFbdjdisflzM8=
github.com/btcsuite/btcd/btcutil v1.1.5/go.mod h1:PSZZ4UitpLBWzxGd5VGOrLnmOjtPP/a6HaFo12zMs00=
github.com/btcsuite/btcd/btcutil/psbt v1.1.9 h1:UmfOIiWMZcVMOLaN+lxbbLSuoINGS1WmK1TZNI0b4yk=
github.com/btcsuite/btcd/btcutil/psbt v1.1.9/go.mod h1:ehBEvU91lxSlXtA+zZz3iFYx7Yq9eqnKx4/kSrnsvMY=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ=
@@ -50,10 +55,12 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
github.com/dhui/dktest v0.4.1 h1:/w+IWuDXVymg3IrRJCHHOkMK10m9aNVMOyD0X12YVTg=
github.com/dhui/dktest v0.4.1/go.mod h1:DdOqcUpL7vgyP4GlF3X3w7HbSlz8cEQzwewPveYEQbA=
@@ -92,12 +99,13 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
@@ -214,14 +222,18 @@ github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMV
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
@@ -243,14 +255,14 @@ golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.20.0 h1:jmAMJJZXr5KiCw05dfYK9QnqaqKLYXijU23lsEdcQqg=
golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/exp v0.0.0-20240525044651-4c93da0ed11d h1:N0hmiNbwsSNwHBAvR3QB5w25pUwH4tK0Y/RltD1j1h4=
golang.org/x/exp v0.0.0-20240525044651-4c93da0ed11d/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -265,8 +277,8 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -279,19 +291,19 @@ golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw=
golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -302,6 +314,8 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=

View File

@@ -8,9 +8,12 @@ import (
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
nodesaleconfig "github.com/gaze-network/indexer-network/modules/nodesale/config"
runesconfig "github.com/gaze-network/indexer-network/modules/runes/config"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
"github.com/gaze-network/indexer-network/pkg/middleware/requestcontext"
"github.com/gaze-network/indexer-network/pkg/middleware/requestlogger"
"github.com/gaze-network/indexer-network/pkg/reportingclient"
"github.com/spf13/pflag"
"github.com/spf13/viper"
@@ -59,11 +62,14 @@ type BitcoinNodeClient struct {
}
type Modules struct {
Runes runesconfig.Config `mapstructure:"runes"`
Runes runesconfig.Config `mapstructure:"runes"`
NodeSale nodesaleconfig.Config `mapstructure:"nodesale"`
}
type HTTPServerConfig struct {
Port int `mapstructure:"port"`
Port int `mapstructure:"port"`
Logger requestlogger.Config `mapstructure:"logger"`
RequestIP requestcontext.WithClientIPConfig `mapstructure:"requestip"`
}
// Parse parse the configuration from environment variables

View File

@@ -0,0 +1,99 @@
package httphandler
import (
"fmt"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
"github.com/gaze-network/indexer-network/modules/nodesale/protobuf"
"github.com/gofiber/fiber/v2"
"google.golang.org/protobuf/encoding/protojson"
)
type deployRequest struct {
DeployID string `params:"deployId"`
}
type tierResponse struct {
PriceSat uint32 `json:"priceSat"`
Limit uint32 `json:"limit"`
MaxPerAddress uint32 `json:"maxPerAddress"`
Sold int64 `json:"sold"`
}
type deployResponse struct {
Id string `json:"id"`
Name string `json:"name"`
StartsAt int64 `json:"startsAt"`
EndsAt int64 `json:"endsAt"`
Tiers []tierResponse `json:"tiers"`
SellerPublicKey string `json:"sellerPublicKey"`
MaxPerAddress uint32 `json:"maxPerAddress"`
DeployTxHash string `json:"deployTxHash"`
}
func (h *handler) deployHandler(ctx *fiber.Ctx) error {
var request deployRequest
err := ctx.ParamsParser(&request)
if err != nil {
return errors.Wrap(err, "cannot parse param")
}
var blockHeight uint64
var txIndex uint32
count, err := fmt.Sscanf(request.DeployID, "%d-%d", &blockHeight, &txIndex)
if count != 2 || err != nil {
return errs.NewPublicError("Invalid deploy ID")
}
deploys, err := h.nodeSaleDg.GetNodeSale(ctx.UserContext(), datagateway.GetNodeSaleParams{
BlockHeight: blockHeight,
TxIndex: txIndex,
})
if err != nil {
return errors.Wrap(err, "Cannot get NodeSale from db")
}
if len(deploys) < 1 {
return errs.NewPublicError("NodeSale not found")
}
deploy := deploys[0]
nodeCount, err := h.nodeSaleDg.GetNodeCountByTierIndex(ctx.UserContext(), datagateway.GetNodeCountByTierIndexParams{
SaleBlock: deploy.BlockHeight,
SaleTxIndex: deploy.TxIndex,
FromTier: 0,
ToTier: uint32(len(deploy.Tiers) - 1),
})
if err != nil {
return errors.Wrap(err, "Cannot get node count from db")
}
tiers := make([]protobuf.Tier, len(deploy.Tiers))
tierResponses := make([]tierResponse, len(deploy.Tiers))
for i, tierJson := range deploy.Tiers {
tier := &tiers[i]
err := protojson.Unmarshal(tierJson, tier)
if err != nil {
return errors.Wrap(err, "Failed to decode tiers json")
}
tierResponses[i].Limit = tiers[i].Limit
tierResponses[i].MaxPerAddress = tiers[i].MaxPerAddress
tierResponses[i].PriceSat = tiers[i].PriceSat
tierResponses[i].Sold = nodeCount[i].Count
}
err = ctx.JSON(&deployResponse{
Id: request.DeployID,
Name: deploy.Name,
StartsAt: deploy.StartsAt.UTC().Unix(),
EndsAt: deploy.EndsAt.UTC().Unix(),
Tiers: tierResponses,
SellerPublicKey: deploy.SellerPublicKey,
MaxPerAddress: deploy.MaxPerAddress,
DeployTxHash: deploy.DeployTxHash,
})
if err != nil {
return errors.Wrap(err, "Go fiber cannot parse JSON")
}
return nil
}

View File

@@ -0,0 +1,56 @@
package httphandler
import (
"encoding/json"
"time"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/modules/nodesale/protobuf"
"github.com/gofiber/fiber/v2"
)
type eventRequest struct {
WalletAddress string `query:"walletAddress"`
}
type eventResposne struct {
TxHash string `json:"txHash"`
BlockHeight int64 `json:"blockHeight"`
TxIndex int32 `json:"txIndex"`
WalletAddress string `json:"walletAddress"`
Action string `json:"action"`
ParsedMessage json.RawMessage `json:"parsedMessage"`
BlockTimestamp time.Time `json:"blockTimestamp"`
BlockHash string `json:"blockHash"`
}
func (h *handler) eventsHandler(ctx *fiber.Ctx) error {
var request eventRequest
err := ctx.QueryParser(&request)
if err != nil {
return errors.Wrap(err, "cannot parse query")
}
events, err := h.nodeSaleDg.GetEventsByWallet(ctx.UserContext(), request.WalletAddress)
if err != nil {
return errors.Wrap(err, "Can't get events from db")
}
responses := make([]eventResposne, len(events))
for i, event := range events {
responses[i].TxHash = event.TxHash
responses[i].BlockHeight = event.BlockHeight
responses[i].TxIndex = event.TxIndex
responses[i].WalletAddress = event.WalletAddress
responses[i].Action = protobuf.Action_name[event.Action]
responses[i].ParsedMessage = event.ParsedMessage
responses[i].BlockTimestamp = event.BlockTimestamp
responses[i].BlockHash = event.BlockHash
}
err = ctx.JSON(responses)
if err != nil {
return errors.Wrap(err, "Go fiber cannot parse JSON")
}
return nil
}

View File

@@ -0,0 +1,15 @@
package httphandler
import (
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
)
type handler struct {
nodeSaleDg datagateway.NodeSaleDataGateway
}
func New(datagateway datagateway.NodeSaleDataGateway) *handler {
h := handler{}
h.nodeSaleDg = datagateway
return &h
}

View File

@@ -0,0 +1,26 @@
package httphandler
import (
"github.com/cockroachdb/errors"
"github.com/gofiber/fiber/v2"
)
type infoResponse struct {
IndexedBlockHeight int64 `json:"indexedBlockHeight"`
IndexedBlockHash string `json:"indexedBlockHash"`
}
func (h *handler) infoHandler(ctx *fiber.Ctx) error {
block, err := h.nodeSaleDg.GetLastProcessedBlock(ctx.UserContext())
if err != nil {
return errors.Wrap(err, "Cannot get last processed block")
}
err = ctx.JSON(infoResponse{
IndexedBlockHeight: block.BlockHeight,
IndexedBlockHash: block.BlockHash,
})
if err != nil {
return errors.Wrap(err, "Go fiber cannot parse JSON")
}
return nil
}

View File

@@ -0,0 +1,82 @@
package httphandler
import (
"fmt"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
"github.com/gofiber/fiber/v2"
)
type nodeRequest struct {
DeployId string `query:"deployId"`
OwnerPublicKey string `query:"ownerPublicKey"`
DelegateePublicKey string `query:"delegateePublicKey"`
}
type nodeResponse struct {
DeployId string `json:"deployId"`
NodeId uint32 `json:"nodeId"`
TierIndex int32 `json:"tierIndex"`
DelegatedTo string `json:"delegatedTo"`
OwnerPublicKey string `json:"ownerPublicKey"`
PurchaseTxHash string `json:"purchaseTxHash"`
DelegateTxHash string `json:"delegateTxHash"`
PurchaseBlockHeight int32 `json:"purchaseBlockHeight"`
}
func (h *handler) nodesHandler(ctx *fiber.Ctx) error {
var request nodeRequest
err := ctx.QueryParser(&request)
if err != nil {
return errors.Wrap(err, "cannot parse query")
}
ownerPublicKey := request.OwnerPublicKey
delegateePublicKey := request.DelegateePublicKey
var blockHeight int64
var txIndex int32
count, err := fmt.Sscanf(request.DeployId, "%d-%d", &blockHeight, &txIndex)
if count != 2 || err != nil {
return errs.NewPublicError("Invalid deploy ID")
}
var nodes []entity.Node
if ownerPublicKey == "" {
nodes, err = h.nodeSaleDg.GetNodesByDeployment(ctx.UserContext(), blockHeight, txIndex)
if err != nil {
return errors.Wrap(err, "Can't get nodes from db")
}
} else {
nodes, err = h.nodeSaleDg.GetNodesByPubkey(ctx.UserContext(), datagateway.GetNodesByPubkeyParams{
SaleBlock: blockHeight,
SaleTxIndex: txIndex,
OwnerPublicKey: ownerPublicKey,
DelegatedTo: delegateePublicKey,
})
if err != nil {
return errors.Wrap(err, "Can't get nodes from db")
}
}
responses := make([]nodeResponse, len(nodes))
for i, node := range nodes {
responses[i].DeployId = request.DeployId
responses[i].NodeId = node.NodeID
responses[i].TierIndex = node.TierIndex
responses[i].DelegatedTo = node.DelegatedTo
responses[i].OwnerPublicKey = node.OwnerPublicKey
responses[i].PurchaseTxHash = node.PurchaseTxHash
responses[i].DelegateTxHash = node.DelegateTxHash
responses[i].PurchaseBlockHeight = txIndex
}
err = ctx.JSON(responses)
if err != nil {
return errors.Wrap(err, "Go fiber cannot parse JSON")
}
return nil
}

View File

@@ -0,0 +1,16 @@
package httphandler
import (
"github.com/gofiber/fiber/v2"
)
func (h *handler) Mount(router fiber.Router) error {
r := router.Group("/nodesale/v1")
r.Get("/info", h.infoHandler)
r.Get("/deploy/:deployId", h.deployHandler)
r.Get("/nodes", h.nodesHandler)
r.Get("/events", h.eventsHandler)
return nil
}

View File

@@ -0,0 +1,8 @@
package config
import "github.com/gaze-network/indexer-network/internal/postgres"
type Config struct {
Postgres postgres.Config `mapstructure:"postgres"`
LastBlockDefault int64 `mapstructure:"last_block_default"`
}

View File

@@ -0,0 +1,9 @@
BEGIN;
DROP TABLE IF EXISTS nodes;
DROP TABLE IF EXISTS node_sales;
DROP TABLE IF EXISTS events;
DROP TABLE IF EXISTS blocks;
COMMIT;

View File

@@ -0,0 +1,64 @@
BEGIN;
CREATE TABLE IF NOT EXISTS blocks (
"block_height" BIGINT NOT NULL,
"block_hash" TEXT NOT NULL,
"module" TEXT NOT NULL,
PRIMARY KEY("block_height", "block_hash")
);
CREATE TABLE IF NOT EXISTS events (
"tx_hash" TEXT NOT NULL PRIMARY KEY,
"block_height" BIGINT NOT NULL,
"tx_index" INTEGER NOT NULL,
"wallet_address" TEXT NOT NULL,
"valid" BOOLEAN NOT NULL,
"action" INTEGER NOT NULL,
"raw_message" BYTEA NOT NULL,
"parsed_message" JSONB NOT NULL DEFAULT '{}',
"block_timestamp" TIMESTAMP NOT NULL,
"block_hash" TEXT NOT NULL,
"metadata" JSONB NOT NULL DEFAULT '{}',
"reason" TEXT NOT NULL DEFAULT ''
);
INSERT INTO events("tx_hash", "block_height", "tx_index",
"wallet_address", "valid", "action",
"raw_message", "parsed_message", "block_timestamp",
"block_hash", "metadata")
VALUES ('', -1, -1,
'', false, -1,
'', '{}', NOW(),
'', '{}');
CREATE TABLE IF NOT EXISTS node_sales (
"block_height" BIGINT NOT NULL,
"tx_index" INTEGER NOT NULL,
"name" TEXT NOT NULL,
"starts_at" TIMESTAMP NOT NULL,
"ends_at" TIMESTAMP NOT NULL,
"tiers" JSONB[] NOT NULL,
"seller_public_key" TEXT NOT NULL,
"max_per_address" INTEGER NOT NULL,
"deploy_tx_hash" TEXT NOT NULL REFERENCES events(tx_hash) ON DELETE CASCADE,
"max_discount_percentage" INTEGER NOT NULL,
"seller_wallet" TEXT NOT NULL,
PRIMARY KEY ("block_height", "tx_index")
);
CREATE TABLE IF NOT EXISTS nodes (
"sale_block" BIGINT NOT NULL,
"sale_tx_index" INTEGER NOT NULL,
"node_id" INTEGER NOT NULL,
"tier_index" INTEGER NOT NULL,
"delegated_to" TEXT NOT NULL DEFAULT '',
"owner_public_key" TEXT NOT NULL,
"purchase_tx_hash" TEXT NOT NULL REFERENCES events(tx_hash) ON DELETE CASCADE,
"delegate_tx_hash" TEXT NOT NULL DEFAULT '' REFERENCES events(tx_hash) ON DELETE SET DEFAULT,
PRIMARY KEY("sale_block", "sale_tx_index", "node_id"),
FOREIGN KEY("sale_block", "sale_tx_index") REFERENCES node_sales("block_height", "tx_index")
);
COMMIT;

View File

@@ -0,0 +1,15 @@
-- name: GetLastProcessedBlock :one
SELECT * FROM blocks ORDER BY block_height DESC LIMIT 1;
-- name: GetBlock :one
SELECT * FROM blocks
WHERE "block_height" = $1;
-- name: RemoveBlockFrom :execrows
DELETE FROM blocks
WHERE "block_height" >= @from_block;
-- name: CreateBlock :exec
INSERT INTO blocks ("block_height", "block_hash", "module")
VALUES ($1, $2, $3);

View File

@@ -0,0 +1,14 @@
-- name: RemoveEventsFromBlock :execrows
DELETE FROM events
WHERE "block_height" >= @from_block;
-- name: CreateEvent :exec
INSERT INTO events ("tx_hash", "block_height", "tx_index", "wallet_address", "valid", "action",
"raw_message", "parsed_message", "block_timestamp", "block_hash", "metadata",
"reason")
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12);
-- name: GetEventsByWallet :many
SELECT *
FROM events
WHERE wallet_address = $1;

View File

@@ -0,0 +1,57 @@
-- name: ClearDelegate :execrows
UPDATE nodes
SET "delegated_to" = ''
WHERE "delegate_tx_hash" = '';
-- name: SetDelegates :execrows
UPDATE nodes
SET delegated_to = @delegatee, delegate_tx_hash = $3
WHERE sale_block = $1 AND
sale_tx_index = $2 AND
node_id = ANY (@node_ids::int[]);
-- name: GetNodesByIds :many
SELECT *
FROM nodes
WHERE sale_block = $1 AND
sale_tx_index = $2 AND
node_id = ANY (@node_ids::int[]);
-- name: GetNodesByOwner :many
SELECT *
FROM nodes
WHERE sale_block = $1 AND
sale_tx_index = $2 AND
owner_public_key = $3
ORDER BY tier_index;
-- name: GetNodesByPubkey :many
SELECT nodes.*
FROM nodes JOIN events ON nodes.purchase_tx_hash = events.tx_hash
WHERE sale_block = $1 AND
sale_tx_index = $2 AND
owner_public_key = $3 AND
delegated_to = $4;
-- name: CreateNode :exec
INSERT INTO nodes (sale_block, sale_tx_index, node_id, tier_index, delegated_to, owner_public_key, purchase_tx_hash, delegate_tx_hash)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8);
-- name: GetNodeCountByTierIndex :many
SELECT (tiers.tier_index)::int AS tier_index, count(nodes.tier_index)
FROM generate_series(@from_tier::int,@to_tier::int) AS tiers(tier_index)
LEFT JOIN
(SELECT *
FROM nodes
WHERE sale_block = $1 AND
sale_tx_index= $2)
AS nodes ON tiers.tier_index = nodes.tier_index
GROUP BY tiers.tier_index
ORDER BY tiers.tier_index;
-- name: GetNodesByDeployment :many
SELECT *
FROM nodes
WHERE sale_block = $1 AND
sale_tx_index = $2;

View File

@@ -0,0 +1,9 @@
-- name: CreateNodeSale :exec
INSERT INTO node_sales ("block_height", "tx_index", "name", "starts_at", "ends_at", "tiers", "seller_public_key", "max_per_address", "deploy_tx_hash", "max_discount_percentage", "seller_wallet")
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11);
-- name: GetNodeSale :many
SELECT *
FROM node_sales
WHERE block_height = $1 AND
tx_index = $2;

View File

@@ -0,0 +1,3 @@
-- name: ClearEvents :exec
DELETE FROM events
WHERE tx_hash <> '';

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,77 @@
package datagateway
import (
"context"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
)
type NodeSaleDataGateway interface {
BeginNodeSaleTx(ctx context.Context) (NodeSaleDataGatewayWithTx, error)
CreateBlock(ctx context.Context, arg entity.Block) error
GetBlock(ctx context.Context, blockHeight int64) (*entity.Block, error)
GetLastProcessedBlock(ctx context.Context) (*entity.Block, error)
RemoveBlockFrom(ctx context.Context, fromBlock int64) (int64, error)
RemoveEventsFromBlock(ctx context.Context, fromBlock int64) (int64, error)
ClearDelegate(ctx context.Context) (int64, error)
GetNodesByIds(ctx context.Context, arg GetNodesByIdsParams) ([]entity.Node, error)
CreateEvent(ctx context.Context, arg entity.NodeSaleEvent) error
SetDelegates(ctx context.Context, arg SetDelegatesParams) (int64, error)
CreateNodeSale(ctx context.Context, arg entity.NodeSale) error
GetNodeSale(ctx context.Context, arg GetNodeSaleParams) ([]entity.NodeSale, error)
GetNodesByOwner(ctx context.Context, arg GetNodesByOwnerParams) ([]entity.Node, error)
CreateNode(ctx context.Context, arg entity.Node) error
GetNodeCountByTierIndex(ctx context.Context, arg GetNodeCountByTierIndexParams) ([]GetNodeCountByTierIndexRow, error)
GetNodesByPubkey(ctx context.Context, arg GetNodesByPubkeyParams) ([]entity.Node, error)
GetNodesByDeployment(ctx context.Context, saleBlock int64, saleTxIndex int32) ([]entity.Node, error)
GetEventsByWallet(ctx context.Context, walletAddress string) ([]entity.NodeSaleEvent, error)
}
type NodeSaleDataGatewayWithTx interface {
NodeSaleDataGateway
Tx
}
type GetNodesByIdsParams struct {
SaleBlock uint64
SaleTxIndex uint32
NodeIds []uint32
}
type SetDelegatesParams struct {
SaleBlock uint64
SaleTxIndex int32
Delegatee string
DelegateTxHash string
NodeIds []uint32
}
type GetNodeSaleParams struct {
BlockHeight uint64
TxIndex uint32
}
type GetNodesByOwnerParams struct {
SaleBlock uint64
SaleTxIndex uint32
OwnerPublicKey string
}
type GetNodeCountByTierIndexParams struct {
SaleBlock uint64
SaleTxIndex uint32
FromTier uint32
ToTier uint32
}
type GetNodeCountByTierIndexRow struct {
TierIndex int32
Count int64
}
type GetNodesByPubkeyParams struct {
SaleBlock int64
SaleTxIndex int32
OwnerPublicKey string
DelegatedTo string
}

View File

@@ -0,0 +1,12 @@
package datagateway
import "context"
type Tx interface {
// Commit commits the DB transaction. All changes made after Begin() will be persisted. Calling Commit() will close the current transaction.
// If Commit() is called without a prior Begin(), it must be a no-op.
Commit(ctx context.Context) error
// Rollback rolls back the DB transaction. All changes made after Begin() will be discarded.
// Rollback() must be safe to call even if no transaction is active. Hence, a defer Rollback() is safe, even if Commit() was called prior with non-error conditions.
Rollback(ctx context.Context) error
}

View File

@@ -0,0 +1,61 @@
package nodesale
import (
"context"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
delegatevalidator "github.com/gaze-network/indexer-network/modules/nodesale/internal/validator/delegate"
)
func (p *Processor) ProcessDelegate(ctx context.Context, qtx datagateway.NodeSaleDataGatewayWithTx, block *types.Block, event NodeSaleEvent) error {
validator := delegatevalidator.New()
delegate := event.EventMessage.Delegate
_, nodes, err := validator.NodesExist(ctx, qtx, delegate.DeployID, delegate.NodeIDs)
if err != nil {
return errors.Wrap(err, "Cannot query")
}
for _, node := range nodes {
valid := validator.EqualXonlyPublicKey(node.OwnerPublicKey, event.TxPubkey)
if !valid {
break
}
}
err = qtx.CreateEvent(ctx, entity.NodeSaleEvent{
TxHash: event.Transaction.TxHash.String(),
TxIndex: int32(event.Transaction.Index),
Action: int32(event.EventMessage.Action),
RawMessage: event.RawData,
ParsedMessage: event.EventJson,
BlockTimestamp: block.Header.Timestamp,
BlockHash: event.Transaction.BlockHash.String(),
BlockHeight: event.Transaction.BlockHeight,
Valid: validator.Valid,
WalletAddress: p.PubkeyToPkHashAddress(event.TxPubkey).EncodeAddress(),
Metadata: nil,
Reason: validator.Reason,
})
if err != nil {
return errors.Wrap(err, "Failed to insert event")
}
if validator.Valid {
_, err = qtx.SetDelegates(ctx, datagateway.SetDelegatesParams{
SaleBlock: delegate.DeployID.Block,
SaleTxIndex: int32(delegate.DeployID.TxIndex),
Delegatee: delegate.DelegateePublicKey,
DelegateTxHash: event.Transaction.TxHash.String(),
NodeIds: delegate.NodeIDs,
})
if err != nil {
return errors.Wrap(err, "Failed to set delegate")
}
}
return nil
}

View File

@@ -0,0 +1,84 @@
package nodesale
import (
"context"
"encoding/hex"
"testing"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway/mocks"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
"github.com/gaze-network/indexer-network/modules/nodesale/protobuf"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestDelegate(t *testing.T) {
ctx := context.Background()
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
buyerPrivateKey, _ := btcec.NewPrivateKey()
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
delegateePrivateKey, _ := btcec.NewPrivateKey()
delegateePubkeyHex := hex.EncodeToString(delegateePrivateKey.PubKey().SerializeCompressed())
delegateMessage := &protobuf.NodeSaleEvent{
Action: protobuf.Action_ACTION_DELEGATE,
Delegate: &protobuf.ActionDelegate{
DelegateePublicKey: delegateePubkeyHex,
NodeIDs: []uint32{9, 10},
DeployID: &protobuf.ActionID{
Block: uint64(testBlockHeight) - 2,
TxIndex: uint32(testTxIndex) - 2,
},
},
}
event, block := assembleTestEvent(buyerPrivateKey, "131313131313", "131313131313", 0, 0, delegateMessage)
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
return event.Valid == true
})).Return(nil)
mockDgTx.EXPECT().GetNodesByIds(mock.Anything, datagateway.GetNodesByIdsParams{
SaleBlock: delegateMessage.Delegate.DeployID.Block,
SaleTxIndex: delegateMessage.Delegate.DeployID.TxIndex,
NodeIds: []uint32{9, 10},
}).Return([]entity.Node{
{
SaleBlock: delegateMessage.Delegate.DeployID.Block,
SaleTxIndex: delegateMessage.Delegate.DeployID.TxIndex,
NodeID: 9,
TierIndex: 1,
DelegatedTo: "",
OwnerPublicKey: buyerPubkeyHex,
PurchaseTxHash: mock.Anything,
DelegateTxHash: "",
},
{
SaleBlock: delegateMessage.Delegate.DeployID.Block,
SaleTxIndex: delegateMessage.Delegate.DeployID.TxIndex,
NodeID: 10,
TierIndex: 2,
DelegatedTo: "",
OwnerPublicKey: buyerPubkeyHex,
PurchaseTxHash: mock.Anything,
DelegateTxHash: "",
},
}, nil)
mockDgTx.EXPECT().SetDelegates(mock.Anything, datagateway.SetDelegatesParams{
SaleBlock: delegateMessage.Delegate.DeployID.Block,
SaleTxIndex: int32(delegateMessage.Delegate.DeployID.TxIndex),
Delegatee: delegateMessage.Delegate.DelegateePublicKey,
DelegateTxHash: event.Transaction.TxHash.String(),
NodeIds: delegateMessage.Delegate.NodeIDs,
}).Return(2, nil)
err := p.ProcessDelegate(ctx, mockDgTx, block, event)
require.NoError(t, err)
}

View File

@@ -0,0 +1,67 @@
package nodesale
import (
"context"
"time"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/validator"
"google.golang.org/protobuf/encoding/protojson"
)
func (p *Processor) ProcessDeploy(ctx context.Context, qtx datagateway.NodeSaleDataGatewayWithTx, block *types.Block, event NodeSaleEvent) error {
deploy := event.EventMessage.Deploy
validator := validator.New()
validator.EqualXonlyPublicKey(deploy.SellerPublicKey, event.TxPubkey)
err := qtx.CreateEvent(ctx, entity.NodeSaleEvent{
TxHash: event.Transaction.TxHash.String(),
TxIndex: int32(event.Transaction.Index),
Action: int32(event.EventMessage.Action),
RawMessage: event.RawData,
ParsedMessage: event.EventJson,
BlockTimestamp: block.Header.Timestamp,
BlockHash: event.Transaction.BlockHash.String(),
BlockHeight: event.Transaction.BlockHeight,
Valid: validator.Valid,
WalletAddress: p.PubkeyToPkHashAddress(event.TxPubkey).EncodeAddress(),
Metadata: nil,
Reason: validator.Reason,
})
if err != nil {
return errors.Wrap(err, "Failed to insert event")
}
if validator.Valid {
tiers := make([][]byte, len(deploy.Tiers))
for i, tier := range deploy.Tiers {
tierJson, err := protojson.Marshal(tier)
if err != nil {
return errors.Wrap(err, "Failed to parse tiers to json")
}
tiers[i] = tierJson
}
err = qtx.CreateNodeSale(ctx, entity.NodeSale{
BlockHeight: uint64(event.Transaction.BlockHeight),
TxIndex: event.Transaction.Index,
Name: deploy.Name,
StartsAt: time.Unix(int64(deploy.StartsAt), 0),
EndsAt: time.Unix(int64(deploy.EndsAt), 0),
Tiers: tiers,
SellerPublicKey: deploy.SellerPublicKey,
MaxPerAddress: deploy.MaxPerAddress,
DeployTxHash: event.Transaction.TxHash.String(),
MaxDiscountPercentage: int32(deploy.MaxDiscountPercentage),
SellerWallet: deploy.SellerWallet,
})
if err != nil {
return errors.Wrap(err, "Failed to insert NodeSale")
}
}
return nil
}

View File

@@ -0,0 +1,139 @@
package nodesale
import (
"context"
"encoding/hex"
"testing"
"time"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway/mocks"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
"github.com/gaze-network/indexer-network/modules/nodesale/protobuf"
"github.com/samber/lo"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/encoding/protojson"
)
func TestDeployInvalid(t *testing.T) {
ctx := context.Background()
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
prvKey, err := btcec.NewPrivateKey()
require.NoError(t, err)
strangerKey, err := btcec.NewPrivateKey()
require.NoError(t, err)
strangerPubkeyHex := hex.EncodeToString(strangerKey.PubKey().SerializeCompressed())
sellerWallet := p.PubkeyToPkHashAddress(prvKey.PubKey())
message := &protobuf.NodeSaleEvent{
Action: protobuf.Action_ACTION_DEPLOY,
Deploy: &protobuf.ActionDeploy{
Name: t.Name(),
StartsAt: 100,
EndsAt: 200,
Tiers: []*protobuf.Tier{
{
PriceSat: 100,
Limit: 5,
MaxPerAddress: 100,
},
{
PriceSat: 200,
Limit: 5,
MaxPerAddress: 100,
},
},
SellerPublicKey: strangerPubkeyHex,
MaxPerAddress: 100,
MaxDiscountPercentage: 50,
SellerWallet: sellerWallet.EncodeAddress(),
},
}
event, block := assembleTestEvent(prvKey, "0101010101", "0101010101", 0, 0, message)
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
return event.Valid == false
})).Return(nil)
err = p.ProcessDeploy(ctx, mockDgTx, block, event)
require.NoError(t, err)
mockDgTx.AssertNotCalled(t, "CreateNodeSale")
}
func TestDeployValid(t *testing.T) {
ctx := context.Background()
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
privateKey, err := btcec.NewPrivateKey()
require.NoError(t, err)
pubkeyHex := hex.EncodeToString(privateKey.PubKey().SerializeCompressed())
sellerWallet := p.PubkeyToPkHashAddress(privateKey.PubKey())
startAt := time.Now().Add(time.Hour * -1)
endAt := time.Now().Add(time.Hour * 1)
message := &protobuf.NodeSaleEvent{
Action: protobuf.Action_ACTION_DEPLOY,
Deploy: &protobuf.ActionDeploy{
Name: t.Name(),
StartsAt: uint32(startAt.UTC().Unix()),
EndsAt: uint32(endAt.UTC().Unix()),
Tiers: []*protobuf.Tier{
{
PriceSat: 100,
Limit: 5,
MaxPerAddress: 100,
},
{
PriceSat: 200,
Limit: 5,
MaxPerAddress: 100,
},
},
SellerPublicKey: pubkeyHex,
MaxPerAddress: 100,
MaxDiscountPercentage: 50,
SellerWallet: sellerWallet.EncodeAddress(),
},
}
event, block := assembleTestEvent(privateKey, "0202020202", "0202020202", 0, 0, message)
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
return event.Valid == true
})).Return(nil)
tiers := lo.Map(message.Deploy.Tiers, func(tier *protobuf.Tier, _ int) []byte {
tierJson, err := protojson.Marshal(tier)
require.NoError(t, err)
return tierJson
})
mockDgTx.EXPECT().CreateNodeSale(mock.Anything, entity.NodeSale{
BlockHeight: uint64(event.Transaction.BlockHeight),
TxIndex: uint32(event.Transaction.Index),
Name: message.Deploy.Name,
StartsAt: time.Unix(int64(message.Deploy.StartsAt), 0),
EndsAt: time.Unix(int64(message.Deploy.EndsAt), 0),
Tiers: tiers,
SellerPublicKey: message.Deploy.SellerPublicKey,
MaxPerAddress: message.Deploy.MaxPerAddress,
DeployTxHash: event.Transaction.TxHash.String(),
MaxDiscountPercentage: int32(message.Deploy.MaxDiscountPercentage),
SellerWallet: message.Deploy.SellerWallet,
}).Return(nil)
p.ProcessDeploy(ctx, mockDgTx, block, event)
}

View File

@@ -0,0 +1,55 @@
package entity
import "time"
type Block struct {
BlockHeight int64
BlockHash string
Module string
}
type Node struct {
SaleBlock uint64
SaleTxIndex uint32
NodeID uint32
TierIndex int32
DelegatedTo string
OwnerPublicKey string
PurchaseTxHash string
DelegateTxHash string
}
type NodeSale struct {
BlockHeight uint64
TxIndex uint32
Name string
StartsAt time.Time
EndsAt time.Time
Tiers [][]byte
SellerPublicKey string
MaxPerAddress uint32
DeployTxHash string
MaxDiscountPercentage int32
SellerWallet string
}
type NodeSaleEvent struct {
TxHash string
BlockHeight int64
TxIndex int32
WalletAddress string
Valid bool
Action int32
RawMessage []byte
ParsedMessage []byte
BlockTimestamp time.Time
BlockHash string
Metadata *MetadataEventPurchase
Reason string
}
type MetadataEventPurchase struct {
ExpectedTotalAmountDiscounted uint64
ReportedTotalAmount uint64
PaidTotalAmount uint64
}

View File

@@ -0,0 +1,51 @@
package delegate
import (
"context"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/validator"
"github.com/gaze-network/indexer-network/modules/nodesale/protobuf"
)
type DelegateValidator struct {
validator.Validator
}
func New() *DelegateValidator {
v := validator.New()
return &DelegateValidator{
Validator: *v,
}
}
func (v *DelegateValidator) NodesExist(
ctx context.Context,
qtx datagateway.NodeSaleDataGatewayWithTx,
deployId *protobuf.ActionID,
nodeIds []uint32,
) (bool, []entity.Node, error) {
if !v.Valid {
return false, nil, nil
}
nodes, err := qtx.GetNodesByIds(ctx, datagateway.GetNodesByIdsParams{
SaleBlock: deployId.Block,
SaleTxIndex: deployId.TxIndex,
NodeIds: nodeIds,
})
if err != nil {
v.Valid = false
return v.Valid, nil, errors.Wrap(err, "Failed to get nodes")
}
if len(nodeIds) != len(nodes) {
v.Valid = false
return v.Valid, nil, nil
}
v.Valid = true
return v.Valid, nodes, nil
}

View File

@@ -0,0 +1,6 @@
package validator
const (
INVALID_PUBKEY_FORMAT = "Cannot parse public key"
INVALID_PUBKEY = "Invalid public key"
)

View File

@@ -0,0 +1,17 @@
package purchase
const (
DEPLOYID_NOT_FOUND = "Depoloy ID not found."
PURCHASE_TIMEOUT = "Purchase timeout."
BLOCK_HEIGHT_TIMEOUT = "Block height over timeout block"
INVALID_SIGNATURE_FORMAT = "Cannot parse signature."
INVALID_SIGNATURE = "Invalid Signature."
INVALID_TIER_JSON = "Invalid Tier format"
INVALID_NODE_ID = "Invalid NodeId."
NODE_ALREADY_PURCHASED = "Some node has been purchased."
INVALID_SELLER_ADDR_FORMAT = "Invalid seller address."
INVALID_PAYMENT = "Total amount paid less than reported price"
INSUFFICIENT_FUND = "Insufficient fund"
OVER_LIMIT_PER_ADDR = "Purchase over limit per address."
OVER_LIMIT_PER_TIER = "Purchase over limit per tier."
)

View File

@@ -0,0 +1,283 @@
package purchase
import (
"context"
"encoding/hex"
"slices"
"time"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcec/v2/ecdsa"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/validator"
"github.com/gaze-network/indexer-network/modules/nodesale/protobuf"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
)
type PurchaseValidator struct {
validator.Validator
}
func New() *PurchaseValidator {
v := validator.New()
return &PurchaseValidator{
Validator: *v,
}
}
func (v *PurchaseValidator) NodeSaleExists(ctx context.Context, qtx datagateway.NodeSaleDataGatewayWithTx, payload *protobuf.PurchasePayload) (bool, *entity.NodeSale, error) {
if !v.Valid {
return false, nil, nil
}
// check node existed
deploys, err := qtx.GetNodeSale(ctx, datagateway.GetNodeSaleParams{
BlockHeight: payload.DeployID.Block,
TxIndex: payload.DeployID.TxIndex,
})
if err != nil {
v.Valid = false
return v.Valid, nil, errors.Wrap(err, "Failed to Get NodeSale")
}
if len(deploys) < 1 {
v.Valid = false
v.Reason = DEPLOYID_NOT_FOUND
return v.Valid, nil, nil
}
v.Valid = true
return v.Valid, &deploys[0], nil
}
func (v *PurchaseValidator) ValidTimestamp(deploy *entity.NodeSale, timestamp time.Time) bool {
if !v.Valid {
return false
}
if timestamp.Before(deploy.StartsAt) ||
timestamp.After(deploy.EndsAt) {
v.Valid = false
v.Reason = PURCHASE_TIMEOUT
return v.Valid
}
v.Valid = true
return v.Valid
}
func (v *PurchaseValidator) WithinTimeoutBlock(timeOutBlock uint64, blockHeight uint64) bool {
if !v.Valid {
return false
}
if timeOutBlock == 0 {
// No timeout
v.Valid = true
return v.Valid
}
if timeOutBlock < blockHeight {
v.Valid = false
v.Reason = BLOCK_HEIGHT_TIMEOUT
return v.Valid
}
v.Valid = true
return v.Valid
}
func (v *PurchaseValidator) VerifySignature(purchase *protobuf.ActionPurchase, deploy *entity.NodeSale) bool {
if !v.Valid {
return false
}
payload := purchase.Payload
payloadBytes, _ := proto.Marshal(payload)
signatureBytes, _ := hex.DecodeString(purchase.SellerSignature)
signature, err := ecdsa.ParseSignature(signatureBytes)
if err != nil {
v.Valid = false
v.Reason = INVALID_SIGNATURE_FORMAT
return v.Valid
}
hash := chainhash.DoubleHashB(payloadBytes)
pubkeyBytes, _ := hex.DecodeString(deploy.SellerPublicKey)
pubKey, _ := btcec.ParsePubKey(pubkeyBytes)
verified := signature.Verify(hash[:], pubKey)
if !verified {
v.Valid = false
v.Reason = INVALID_SIGNATURE
return v.Valid
}
v.Valid = true
return v.Valid
}
type TierMap struct {
Tiers []protobuf.Tier
BuyingTiersCount []uint32
NodeIdToTier map[uint32]int32
}
func (v *PurchaseValidator) ValidTiers(
payload *protobuf.PurchasePayload,
deploy *entity.NodeSale,
) (bool, TierMap) {
if !v.Valid {
return false, TierMap{}
}
tiers := make([]protobuf.Tier, len(deploy.Tiers))
buyingTiersCount := make([]uint32, len(tiers))
nodeIdToTier := make(map[uint32]int32)
for i, tierJson := range deploy.Tiers {
tier := &tiers[i]
err := protojson.Unmarshal(tierJson, tier)
if err != nil {
v.Valid = false
v.Reason = INVALID_TIER_JSON
return v.Valid, TierMap{}
}
}
slices.Sort(payload.NodeIDs)
var currentTier int32 = -1
var tierSum uint32 = 0
for _, nodeId := range payload.NodeIDs {
for nodeId >= tierSum && currentTier < int32(len(tiers)-1) {
currentTier++
tierSum += tiers[currentTier].Limit
}
if nodeId < tierSum {
buyingTiersCount[currentTier]++
nodeIdToTier[nodeId] = currentTier
} else {
v.Valid = false
v.Reason = INVALID_NODE_ID
return false, TierMap{}
}
}
v.Valid = true
return v.Valid, TierMap{
Tiers: tiers,
BuyingTiersCount: buyingTiersCount,
NodeIdToTier: nodeIdToTier,
}
}
func (v *PurchaseValidator) ValidUnpurchasedNodes(
ctx context.Context,
qtx datagateway.NodeSaleDataGatewayWithTx,
payload *protobuf.PurchasePayload,
) (bool, error) {
if !v.Valid {
return false, nil
}
// valid unpurchased node ID
nodes, err := qtx.GetNodesByIds(ctx, datagateway.GetNodesByIdsParams{
SaleBlock: payload.DeployID.Block,
SaleTxIndex: payload.DeployID.TxIndex,
NodeIds: payload.NodeIDs,
})
if err != nil {
v.Valid = false
return v.Valid, errors.Wrap(err, "Failed to Get nodes")
}
if len(nodes) > 0 {
v.Valid = false
v.Reason = NODE_ALREADY_PURCHASED
return false, nil
}
v.Valid = true
return true, nil
}
func (v *PurchaseValidator) ValidPaidAmount(
payload *protobuf.PurchasePayload,
deploy *entity.NodeSale,
txPaid uint64,
tiers []protobuf.Tier,
buyingTiersCount []uint32,
network *chaincfg.Params,
) (bool, *entity.MetadataEventPurchase) {
if !v.Valid {
return false, nil
}
meta := entity.MetadataEventPurchase{}
meta.PaidTotalAmount = txPaid
meta.ReportedTotalAmount = uint64(payload.TotalAmountSat)
// total amount paid is greater than report paid
if txPaid < uint64(payload.TotalAmountSat) {
v.Valid = false
v.Reason = INVALID_PAYMENT
return v.Valid, nil
}
// calculate total price
var totalPrice uint64 = 0
for i := 0; i < len(tiers); i++ {
totalPrice += uint64(buyingTiersCount[i] * tiers[i].PriceSat)
}
// report paid is greater than max discounted total price
maxDiscounted := totalPrice * (100 - uint64(deploy.MaxDiscountPercentage))
decimal := maxDiscounted % 100
maxDiscounted /= 100
if decimal%100 >= 50 {
maxDiscounted++
}
meta.ExpectedTotalAmountDiscounted = maxDiscounted
if uint64(payload.TotalAmountSat) < maxDiscounted {
v.Valid = false
v.Reason = INSUFFICIENT_FUND
return v.Valid, nil
}
v.Valid = true
return v.Valid, &meta
}
func (v *PurchaseValidator) WithinLimit(
ctx context.Context,
qtx datagateway.NodeSaleDataGatewayWithTx,
payload *protobuf.PurchasePayload,
deploy *entity.NodeSale,
tiers []protobuf.Tier,
buyingTiersCount []uint32,
) (bool, error) {
if !v.Valid {
return false, nil
}
// check node limit
// get all selled by seller and owned by buyer
buyerOwnedNodes, err := qtx.GetNodesByOwner(ctx, datagateway.GetNodesByOwnerParams{
SaleBlock: deploy.BlockHeight,
SaleTxIndex: deploy.TxIndex,
OwnerPublicKey: payload.BuyerPublicKey,
})
if err != nil {
v.Valid = false
return v.Valid, errors.Wrap(err, "Failed to GetNodesByOwner")
}
if len(buyerOwnedNodes)+len(payload.NodeIDs) > int(deploy.MaxPerAddress) {
v.Valid = false
v.Reason = "Purchase over limit per address."
return v.Valid, nil
}
// check limit
// count each tiers
// check limited for each tier
ownedTiersCount := make([]uint32, len(tiers))
for _, node := range buyerOwnedNodes {
ownedTiersCount[node.TierIndex]++
}
for i := 0; i < len(tiers); i++ {
if ownedTiersCount[i]+buyingTiersCount[i] > tiers[i].MaxPerAddress {
v.Valid = false
v.Reason = "Purchase over limit per tier."
return v.Valid, nil
}
}
v.Valid = true
return v.Valid, nil
}

View File

@@ -0,0 +1,44 @@
package validator
import (
"bytes"
"encoding/hex"
"github.com/btcsuite/btcd/btcec/v2"
)
type Validator struct {
Valid bool
Reason string
}
func New() *Validator {
return &Validator{
Valid: true,
}
}
func (v *Validator) EqualXonlyPublicKey(target string, expected *btcec.PublicKey) bool {
if !v.Valid {
return false
}
targetBytes, err := hex.DecodeString(target)
if err != nil {
v.Valid = false
v.Reason = INVALID_PUBKEY_FORMAT
}
targetPubKey, err := btcec.ParsePubKey(targetBytes)
if err != nil {
v.Valid = false
v.Reason = INVALID_PUBKEY_FORMAT
}
xOnlyTargetPubKey := btcec.ToSerialized(targetPubKey).SchnorrSerialized()
xOnlyExpectedPubKey := btcec.ToSerialized(expected).SchnorrSerialized()
v.Valid = bytes.Equal(xOnlyTargetPubKey[:], xOnlyExpectedPubKey[:])
if !v.Valid {
v.Reason = INVALID_PUBKEY
}
return v.Valid
}

View File

@@ -0,0 +1,61 @@
package nodesale
import (
"context"
"fmt"
"github.com/btcsuite/btcd/rpcclient"
"github.com/gaze-network/indexer-network/core/datasources"
"github.com/gaze-network/indexer-network/core/indexer"
"github.com/gaze-network/indexer-network/internal/config"
"github.com/gaze-network/indexer-network/internal/postgres"
"github.com/gaze-network/indexer-network/modules/nodesale/api/httphandler"
repository "github.com/gaze-network/indexer-network/modules/nodesale/repository/postgres"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gofiber/fiber/v2"
"github.com/samber/do/v2"
)
var NODESALE_MAGIC = []byte{0x6e, 0x73, 0x6f, 0x70}
const (
Version = "v0.0.1-alpha"
)
func New(injector do.Injector) (indexer.IndexerWorker, error) {
ctx := do.MustInvoke[context.Context](injector)
conf := do.MustInvoke[config.Config](injector)
btcClient := do.MustInvoke[*rpcclient.Client](injector)
datasource := datasources.NewBitcoinNode(btcClient)
pg, err := postgres.NewPool(ctx, conf.Modules.NodeSale.Postgres)
if err != nil {
return nil, fmt.Errorf("Can't create postgres connection : %w", err)
}
var cleanupFuncs []func(context.Context) error
cleanupFuncs = append(cleanupFuncs, func(ctx context.Context) error {
pg.Close()
return nil
})
repository := repository.NewRepository(pg)
processor := &Processor{
NodeSaleDg: repository,
BtcClient: datasource,
Network: conf.Network,
cleanupFuncs: cleanupFuncs,
lastBlockDefault: conf.Modules.NodeSale.LastBlockDefault,
}
httpServer := do.MustInvoke[*fiber.App](injector)
nodeSaleHandler := httphandler.New(repository)
if err := nodeSaleHandler.Mount(httpServer); err != nil {
return nil, fmt.Errorf("Can't mount nodesale API : %w", err)
}
logger.InfoContext(ctx, "Mounted nodesale HTTP handler")
indexer := indexer.New(processor, datasource)
logger.InfoContext(ctx, "NodeSale module started.")
return indexer, nil
}

View File

@@ -0,0 +1,61 @@
package nodesale
import (
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/decred/dcrd/dcrec/secp256k1/v4"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/nodesale/protobuf"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
)
var (
testBlockHeight uint64 = 101
testTxIndex uint32 = 1
)
func assembleTestEvent(privateKey *secp256k1.PrivateKey, blockHashHex, txHashHex string, blockHeight uint64, txIndex uint32, message *protobuf.NodeSaleEvent) (NodeSaleEvent, *types.Block) {
blockHash, _ := chainhash.NewHashFromStr(blockHashHex)
txHash, _ := chainhash.NewHashFromStr(txHashHex)
rawData, _ := proto.Marshal(message)
builder := txscript.NewScriptBuilder()
builder.AddOp(txscript.OP_FALSE)
builder.AddOp(txscript.OP_IF)
builder.AddData(rawData)
builder.AddOp(txscript.OP_ENDIF)
messageJson, _ := protojson.Marshal(message)
if blockHeight == 0 {
blockHeight = testBlockHeight
testBlockHeight++
}
if txIndex == 0 {
txIndex = testTxIndex
testTxIndex++
}
event := NodeSaleEvent{
Transaction: &types.Transaction{
BlockHeight: int64(blockHeight),
BlockHash: *blockHash,
Index: uint32(txIndex),
TxHash: *txHash,
},
RawData: rawData,
EventMessage: message,
EventJson: messageJson,
TxPubkey: privateKey.PubKey(),
}
block := &types.Block{
Header: types.BlockHeader{
Timestamp: time.Now().UTC(),
},
}
return event, block
}

View File

@@ -0,0 +1,303 @@
package nodesale
import (
"bytes"
"context"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/core/indexer"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/core/datasources"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
"github.com/gaze-network/indexer-network/modules/nodesale/protobuf"
)
type NodeSaleEvent struct {
Transaction *types.Transaction
EventMessage *protobuf.NodeSaleEvent
EventJson []byte
TxPubkey *btcec.PublicKey
RawData []byte
InputValue uint64
}
func NewProcessor(repository datagateway.NodeSaleDataGateway,
datasource *datasources.BitcoinNodeDatasource,
network common.Network,
cleanupFuncs []func(context.Context) error,
lastBlockDefault int64,
) *Processor {
return &Processor{
NodeSaleDg: repository,
BtcClient: datasource,
Network: network,
cleanupFuncs: cleanupFuncs,
lastBlockDefault: lastBlockDefault,
}
}
func (p *Processor) Shutdown(ctx context.Context) error {
for _, cleanupFunc := range p.cleanupFuncs {
err := cleanupFunc(ctx)
if err != nil {
return errors.Wrap(err, "cleanup function error")
}
}
return nil
}
type Processor struct {
NodeSaleDg datagateway.NodeSaleDataGateway
BtcClient *datasources.BitcoinNodeDatasource
Network common.Network
cleanupFuncs []func(context.Context) error
lastBlockDefault int64
}
// CurrentBlock implements indexer.Processor.
func (p *Processor) CurrentBlock(ctx context.Context) (types.BlockHeader, error) {
block, err := p.NodeSaleDg.GetLastProcessedBlock(ctx)
if err != nil {
logger.InfoContext(ctx, "Couldn't get last processed block. Start from NODESALE_LAST_BLOCK_DEFAULT.",
slogx.Int64("currentBlock", p.lastBlockDefault))
header, err := p.BtcClient.GetBlockHeader(ctx, p.lastBlockDefault)
if err != nil {
return types.BlockHeader{}, errors.Wrap(err, "Cannot get default block from bitcoin node")
}
return types.BlockHeader{
Hash: header.Hash,
Height: p.lastBlockDefault,
}, nil
}
hash, err := chainhash.NewHashFromStr(block.BlockHash)
if err != nil {
logger.PanicContext(ctx, "Invalid hash format found in Database.")
}
return types.BlockHeader{
Hash: *hash,
Height: block.BlockHeight,
}, nil
}
// GetIndexedBlock implements indexer.Processor.
func (p *Processor) GetIndexedBlock(ctx context.Context, height int64) (types.BlockHeader, error) {
block, err := p.NodeSaleDg.GetBlock(ctx, height)
if err != nil {
return types.BlockHeader{}, errors.Wrapf(err, "Block %d not found", height)
}
hash, err := chainhash.NewHashFromStr(block.BlockHash)
if err != nil {
logger.PanicContext(ctx, "Invalid hash format found in Database.")
}
return types.BlockHeader{
Hash: *hash,
Height: block.BlockHeight,
}, nil
}
// Name implements indexer.Processor.
func (p *Processor) Name() string {
return "nodesale"
}
func extractNodeSaleData(witness [][]byte) (data []byte, internalPubkey *btcec.PublicKey, isNodeSale bool) {
tokenizer, controlBlock, isTapScript := extractTapScript(witness)
if !isTapScript {
return []byte{}, nil, false
}
state := 0
for tokenizer.Next() {
switch state {
case 0:
if tokenizer.Opcode() == txscript.OP_0 {
state++
} else {
state = 0
}
case 1:
if tokenizer.Opcode() == txscript.OP_IF {
state++
} else {
state = 0
}
case 2:
if tokenizer.Opcode() == txscript.OP_DATA_4 &&
bytes.Equal(tokenizer.Data(), NODESALE_MAGIC) {
state++
} else {
state = 0
}
case 3:
// Any instruction > txscript.OP_16 is not push data. Note: txscript.OP_PUSHDATAX < txscript.OP_16
if tokenizer.Opcode() <= txscript.OP_16 {
data := tokenizer.Data()
return data, controlBlock.InternalKey, true
}
state = 0
}
}
return []byte{}, nil, false
}
func (p *Processor) parseTransactions(ctx context.Context, transactions []*types.Transaction) ([]NodeSaleEvent, error) {
var events []NodeSaleEvent
for _, t := range transactions {
for _, txIn := range t.TxIn {
data, txPubkey, isNodeSale := extractNodeSaleData(txIn.Witness)
if !isNodeSale {
continue
}
event := &protobuf.NodeSaleEvent{}
err := proto.Unmarshal(data, event)
if err != nil {
logger.WarnContext(ctx, "Invalid Protobuf",
slogx.String("block_hash", t.BlockHash.String()),
slogx.Int("txIndex", int(t.Index)))
continue
}
eventJson, err := protojson.Marshal(event)
if err != nil {
return []NodeSaleEvent{}, errors.Wrap(err, "Failed to parse protobuf to json")
}
prevTx, _, err := p.BtcClient.GetRawTransactionAndHeightByTxHash(ctx, txIn.PreviousOutTxHash)
if err != nil {
return nil, errors.Wrap(err, "Failed to get Previous transaction data")
}
if txIn.PreviousOutIndex >= uint32(len(prevTx.TxOut)) {
return nil, errors.Wrap(err, "Invalid previous transaction from bitcoin")
}
events = append(events, NodeSaleEvent{
Transaction: t,
EventMessage: event,
EventJson: eventJson,
RawData: data,
TxPubkey: txPubkey,
InputValue: uint64(prevTx.TxOut[txIn.PreviousOutIndex].Value),
})
}
}
return events, nil
}
// Process implements indexer.Processor.
func (p *Processor) Process(ctx context.Context, inputs []*types.Block) error {
for _, block := range inputs {
logger.InfoContext(ctx, "NodeSale processing a block",
slogx.Int64("block", block.Header.Height),
slogx.Stringer("hash", block.Header.Hash))
// parse all event from each transaction including reading tx wallet
events, err := p.parseTransactions(ctx, block.Transactions)
if err != nil {
return errors.Wrap(err, "Invalid data from bitcoin client")
}
// open transaction
qtx, err := p.NodeSaleDg.BeginNodeSaleTx(ctx)
if err != nil {
return errors.Wrap(err, "Failed to create transaction")
}
defer func() {
err = qtx.Rollback(ctx)
if err != nil {
logger.PanicContext(ctx, "Failed to rollback db")
}
}()
// write block
err = qtx.CreateBlock(ctx, entity.Block{
BlockHeight: block.Header.Height,
BlockHash: block.Header.Hash.String(),
Module: p.Name(),
})
if err != nil {
return errors.Wrapf(err, "Failed to add block %d", block.Header.Height)
}
// for each events
for _, event := range events {
logger.InfoContext(ctx, "NodeSale processing event",
slogx.Uint32("txIndex", event.Transaction.Index),
slogx.Int64("blockHeight", block.Header.Height),
slogx.Stringer("blockhash", block.Header.Hash),
)
eventMessage := event.EventMessage
switch eventMessage.Action {
case protobuf.Action_ACTION_DEPLOY:
err = p.ProcessDeploy(ctx, qtx, block, event)
if err != nil {
return errors.Wrapf(err, "Failed to deploy at block %d", block.Header.Height)
}
case protobuf.Action_ACTION_DELEGATE:
err = p.ProcessDelegate(ctx, qtx, block, event)
if err != nil {
return errors.Wrapf(err, "Failed to delegate at block %d", block.Header.Height)
}
case protobuf.Action_ACTION_PURCHASE:
err = p.ProcessPurchase(ctx, qtx, block, event)
if err != nil {
return errors.Wrapf(err, "Failed to purchase at block %d", block.Header.Height)
}
default:
logger.DebugContext(ctx, "Invalid event ACTION", slogx.Stringer("txHash", (event.Transaction.TxHash)))
}
}
// close transaction
err = qtx.Commit(ctx)
if err != nil {
return errors.Wrap(err, "Failed to commit transaction")
}
logger.InfoContext(ctx, "NodeSale finished processing block",
slogx.Int64("block", block.Header.Height),
slogx.Stringer("hash", block.Header.Hash))
}
return nil
}
// RevertData implements indexer.Processor.
func (p *Processor) RevertData(ctx context.Context, from int64) error {
qtx, err := p.NodeSaleDg.BeginNodeSaleTx(ctx)
if err != nil {
return errors.Wrap(err, "Failed to create transaction")
}
defer func() { err = qtx.Rollback(ctx) }()
_, err = qtx.RemoveBlockFrom(ctx, from)
if err != nil {
return errors.Wrap(err, "Failed to remove blocks.")
}
affected, err := qtx.RemoveEventsFromBlock(ctx, from)
if err != nil {
return errors.Wrap(err, "Failed to remove events.")
}
_, err = qtx.ClearDelegate(ctx)
if err != nil {
return errors.Wrap(err, "Failed to clear delegate from nodes")
}
err = qtx.Commit(ctx)
if err != nil {
return errors.Wrap(err, "Failed to commit transaction")
}
logger.InfoContext(ctx, "Events removed",
slogx.Int64("Total removed", affected))
return nil
}
// VerifyStates implements indexer.Processor.
func (p *Processor) VerifyStates(ctx context.Context) error {
panic("unimplemented")
}
var _ indexer.Processor[*types.Block] = (*Processor)(nil)

View File

@@ -0,0 +1,806 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.34.1
// protoc v5.26.1
// source: modules/nodesale/protobuf/nodesale.proto
// protoc modules/nodesale/protobuf/nodesale.proto --go_out=. --go_opt=module=github.com/gaze-network/indexer-network
package protobuf
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type Action int32
const (
Action_ACTION_DEPLOY Action = 0
Action_ACTION_PURCHASE Action = 1
Action_ACTION_DELEGATE Action = 2
)
// Enum value maps for Action.
var (
Action_name = map[int32]string{
0: "ACTION_DEPLOY",
1: "ACTION_PURCHASE",
2: "ACTION_DELEGATE",
}
Action_value = map[string]int32{
"ACTION_DEPLOY": 0,
"ACTION_PURCHASE": 1,
"ACTION_DELEGATE": 2,
}
)
func (x Action) Enum() *Action {
p := new(Action)
*p = x
return p
}
func (x Action) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (Action) Descriptor() protoreflect.EnumDescriptor {
return file_modules_nodesale_protobuf_nodesale_proto_enumTypes[0].Descriptor()
}
func (Action) Type() protoreflect.EnumType {
return &file_modules_nodesale_protobuf_nodesale_proto_enumTypes[0]
}
func (x Action) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use Action.Descriptor instead.
func (Action) EnumDescriptor() ([]byte, []int) {
return file_modules_nodesale_protobuf_nodesale_proto_rawDescGZIP(), []int{0}
}
type NodeSaleEvent struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Action Action `protobuf:"varint,1,opt,name=action,proto3,enum=nodesale.Action" json:"action,omitempty"`
Deploy *ActionDeploy `protobuf:"bytes,2,opt,name=deploy,proto3,oneof" json:"deploy,omitempty"`
Purchase *ActionPurchase `protobuf:"bytes,3,opt,name=purchase,proto3,oneof" json:"purchase,omitempty"`
Delegate *ActionDelegate `protobuf:"bytes,4,opt,name=delegate,proto3,oneof" json:"delegate,omitempty"`
}
func (x *NodeSaleEvent) Reset() {
*x = NodeSaleEvent{}
if protoimpl.UnsafeEnabled {
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *NodeSaleEvent) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NodeSaleEvent) ProtoMessage() {}
func (x *NodeSaleEvent) ProtoReflect() protoreflect.Message {
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NodeSaleEvent.ProtoReflect.Descriptor instead.
func (*NodeSaleEvent) Descriptor() ([]byte, []int) {
return file_modules_nodesale_protobuf_nodesale_proto_rawDescGZIP(), []int{0}
}
func (x *NodeSaleEvent) GetAction() Action {
if x != nil {
return x.Action
}
return Action_ACTION_DEPLOY
}
func (x *NodeSaleEvent) GetDeploy() *ActionDeploy {
if x != nil {
return x.Deploy
}
return nil
}
func (x *NodeSaleEvent) GetPurchase() *ActionPurchase {
if x != nil {
return x.Purchase
}
return nil
}
func (x *NodeSaleEvent) GetDelegate() *ActionDelegate {
if x != nil {
return x.Delegate
}
return nil
}
type ActionDeploy struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
StartsAt uint32 `protobuf:"varint,2,opt,name=startsAt,proto3" json:"startsAt,omitempty"`
EndsAt uint32 `protobuf:"varint,3,opt,name=endsAt,proto3" json:"endsAt,omitempty"`
Tiers []*Tier `protobuf:"bytes,4,rep,name=tiers,proto3" json:"tiers,omitempty"`
SellerPublicKey string `protobuf:"bytes,5,opt,name=sellerPublicKey,proto3" json:"sellerPublicKey,omitempty"`
MaxPerAddress uint32 `protobuf:"varint,6,opt,name=maxPerAddress,proto3" json:"maxPerAddress,omitempty"`
MaxDiscountPercentage uint32 `protobuf:"varint,7,opt,name=maxDiscountPercentage,proto3" json:"maxDiscountPercentage,omitempty"`
SellerWallet string `protobuf:"bytes,8,opt,name=sellerWallet,proto3" json:"sellerWallet,omitempty"`
}
func (x *ActionDeploy) Reset() {
*x = ActionDeploy{}
if protoimpl.UnsafeEnabled {
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ActionDeploy) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ActionDeploy) ProtoMessage() {}
func (x *ActionDeploy) ProtoReflect() protoreflect.Message {
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ActionDeploy.ProtoReflect.Descriptor instead.
func (*ActionDeploy) Descriptor() ([]byte, []int) {
return file_modules_nodesale_protobuf_nodesale_proto_rawDescGZIP(), []int{1}
}
func (x *ActionDeploy) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *ActionDeploy) GetStartsAt() uint32 {
if x != nil {
return x.StartsAt
}
return 0
}
func (x *ActionDeploy) GetEndsAt() uint32 {
if x != nil {
return x.EndsAt
}
return 0
}
func (x *ActionDeploy) GetTiers() []*Tier {
if x != nil {
return x.Tiers
}
return nil
}
func (x *ActionDeploy) GetSellerPublicKey() string {
if x != nil {
return x.SellerPublicKey
}
return ""
}
func (x *ActionDeploy) GetMaxPerAddress() uint32 {
if x != nil {
return x.MaxPerAddress
}
return 0
}
func (x *ActionDeploy) GetMaxDiscountPercentage() uint32 {
if x != nil {
return x.MaxDiscountPercentage
}
return 0
}
func (x *ActionDeploy) GetSellerWallet() string {
if x != nil {
return x.SellerWallet
}
return ""
}
type Tier struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
PriceSat uint32 `protobuf:"varint,1,opt,name=priceSat,proto3" json:"priceSat,omitempty"`
Limit uint32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"`
MaxPerAddress uint32 `protobuf:"varint,3,opt,name=maxPerAddress,proto3" json:"maxPerAddress,omitempty"`
}
func (x *Tier) Reset() {
*x = Tier{}
if protoimpl.UnsafeEnabled {
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Tier) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Tier) ProtoMessage() {}
func (x *Tier) ProtoReflect() protoreflect.Message {
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Tier.ProtoReflect.Descriptor instead.
func (*Tier) Descriptor() ([]byte, []int) {
return file_modules_nodesale_protobuf_nodesale_proto_rawDescGZIP(), []int{2}
}
func (x *Tier) GetPriceSat() uint32 {
if x != nil {
return x.PriceSat
}
return 0
}
func (x *Tier) GetLimit() uint32 {
if x != nil {
return x.Limit
}
return 0
}
func (x *Tier) GetMaxPerAddress() uint32 {
if x != nil {
return x.MaxPerAddress
}
return 0
}
type ActionPurchase struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Payload *PurchasePayload `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"`
SellerSignature string `protobuf:"bytes,2,opt,name=sellerSignature,proto3" json:"sellerSignature,omitempty"`
}
func (x *ActionPurchase) Reset() {
*x = ActionPurchase{}
if protoimpl.UnsafeEnabled {
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ActionPurchase) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ActionPurchase) ProtoMessage() {}
func (x *ActionPurchase) ProtoReflect() protoreflect.Message {
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ActionPurchase.ProtoReflect.Descriptor instead.
func (*ActionPurchase) Descriptor() ([]byte, []int) {
return file_modules_nodesale_protobuf_nodesale_proto_rawDescGZIP(), []int{3}
}
func (x *ActionPurchase) GetPayload() *PurchasePayload {
if x != nil {
return x.Payload
}
return nil
}
func (x *ActionPurchase) GetSellerSignature() string {
if x != nil {
return x.SellerSignature
}
return ""
}
type PurchasePayload struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
DeployID *ActionID `protobuf:"bytes,1,opt,name=deployID,proto3" json:"deployID,omitempty"`
BuyerPublicKey string `protobuf:"bytes,2,opt,name=buyerPublicKey,proto3" json:"buyerPublicKey,omitempty"`
NodeIDs []uint32 `protobuf:"varint,3,rep,packed,name=nodeIDs,proto3" json:"nodeIDs,omitempty"`
TotalAmountSat int64 `protobuf:"varint,4,opt,name=totalAmountSat,proto3" json:"totalAmountSat,omitempty"`
TimeOutBlock uint64 `protobuf:"varint,5,opt,name=timeOutBlock,proto3" json:"timeOutBlock,omitempty"`
}
func (x *PurchasePayload) Reset() {
*x = PurchasePayload{}
if protoimpl.UnsafeEnabled {
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *PurchasePayload) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PurchasePayload) ProtoMessage() {}
func (x *PurchasePayload) ProtoReflect() protoreflect.Message {
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PurchasePayload.ProtoReflect.Descriptor instead.
func (*PurchasePayload) Descriptor() ([]byte, []int) {
return file_modules_nodesale_protobuf_nodesale_proto_rawDescGZIP(), []int{4}
}
func (x *PurchasePayload) GetDeployID() *ActionID {
if x != nil {
return x.DeployID
}
return nil
}
func (x *PurchasePayload) GetBuyerPublicKey() string {
if x != nil {
return x.BuyerPublicKey
}
return ""
}
func (x *PurchasePayload) GetNodeIDs() []uint32 {
if x != nil {
return x.NodeIDs
}
return nil
}
func (x *PurchasePayload) GetTotalAmountSat() int64 {
if x != nil {
return x.TotalAmountSat
}
return 0
}
func (x *PurchasePayload) GetTimeOutBlock() uint64 {
if x != nil {
return x.TimeOutBlock
}
return 0
}
type ActionID struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Block uint64 `protobuf:"varint,1,opt,name=block,proto3" json:"block,omitempty"`
TxIndex uint32 `protobuf:"varint,2,opt,name=txIndex,proto3" json:"txIndex,omitempty"`
}
func (x *ActionID) Reset() {
*x = ActionID{}
if protoimpl.UnsafeEnabled {
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ActionID) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ActionID) ProtoMessage() {}
func (x *ActionID) ProtoReflect() protoreflect.Message {
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ActionID.ProtoReflect.Descriptor instead.
func (*ActionID) Descriptor() ([]byte, []int) {
return file_modules_nodesale_protobuf_nodesale_proto_rawDescGZIP(), []int{5}
}
func (x *ActionID) GetBlock() uint64 {
if x != nil {
return x.Block
}
return 0
}
func (x *ActionID) GetTxIndex() uint32 {
if x != nil {
return x.TxIndex
}
return 0
}
type ActionDelegate struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
DelegateePublicKey string `protobuf:"bytes,1,opt,name=delegateePublicKey,proto3" json:"delegateePublicKey,omitempty"`
NodeIDs []uint32 `protobuf:"varint,2,rep,packed,name=nodeIDs,proto3" json:"nodeIDs,omitempty"`
DeployID *ActionID `protobuf:"bytes,3,opt,name=deployID,proto3" json:"deployID,omitempty"`
}
func (x *ActionDelegate) Reset() {
*x = ActionDelegate{}
if protoimpl.UnsafeEnabled {
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ActionDelegate) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ActionDelegate) ProtoMessage() {}
func (x *ActionDelegate) ProtoReflect() protoreflect.Message {
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ActionDelegate.ProtoReflect.Descriptor instead.
func (*ActionDelegate) Descriptor() ([]byte, []int) {
return file_modules_nodesale_protobuf_nodesale_proto_rawDescGZIP(), []int{6}
}
func (x *ActionDelegate) GetDelegateePublicKey() string {
if x != nil {
return x.DelegateePublicKey
}
return ""
}
func (x *ActionDelegate) GetNodeIDs() []uint32 {
if x != nil {
return x.NodeIDs
}
return nil
}
func (x *ActionDelegate) GetDeployID() *ActionID {
if x != nil {
return x.DeployID
}
return nil
}
var File_modules_nodesale_protobuf_nodesale_proto protoreflect.FileDescriptor
var file_modules_nodesale_protobuf_nodesale_proto_rawDesc = []byte{
0x0a, 0x28, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x61,
0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x6e, 0x6f, 0x64, 0x65,
0x73, 0x61, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x6e, 0x6f, 0x64, 0x65,
0x73, 0x61, 0x6c, 0x65, 0x22, 0x89, 0x02, 0x0a, 0x0d, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x61, 0x6c,
0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x61, 0x6c,
0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
0x12, 0x33, 0x0a, 0x06, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x16, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x61, 0x6c, 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69,
0x6f, 0x6e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x48, 0x00, 0x52, 0x06, 0x64, 0x65, 0x70, 0x6c,
0x6f, 0x79, 0x88, 0x01, 0x01, 0x12, 0x39, 0x0a, 0x08, 0x70, 0x75, 0x72, 0x63, 0x68, 0x61, 0x73,
0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x61,
0x6c, 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x75, 0x72, 0x63, 0x68, 0x61, 0x73,
0x65, 0x48, 0x01, 0x52, 0x08, 0x70, 0x75, 0x72, 0x63, 0x68, 0x61, 0x73, 0x65, 0x88, 0x01, 0x01,
0x12, 0x39, 0x0a, 0x08, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x61, 0x6c, 0x65, 0x2e, 0x41, 0x63,
0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x48, 0x02, 0x52, 0x08,
0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f,
0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x70, 0x75, 0x72, 0x63, 0x68,
0x61, 0x73, 0x65, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65,
0x22, 0xa6, 0x02, 0x0a, 0x0c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x6c, 0x6f,
0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x73, 0x41,
0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x73, 0x41,
0x74, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x64, 0x73, 0x41, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28,
0x0d, 0x52, 0x06, 0x65, 0x6e, 0x64, 0x73, 0x41, 0x74, 0x12, 0x24, 0x0a, 0x05, 0x74, 0x69, 0x65,
0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x73,
0x61, 0x6c, 0x65, 0x2e, 0x54, 0x69, 0x65, 0x72, 0x52, 0x05, 0x74, 0x69, 0x65, 0x72, 0x73, 0x12,
0x28, 0x0a, 0x0f, 0x73, 0x65, 0x6c, 0x6c, 0x65, 0x72, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b,
0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x65, 0x6c, 0x6c, 0x65, 0x72,
0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x0d, 0x6d, 0x61, 0x78,
0x50, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d,
0x52, 0x0d, 0x6d, 0x61, 0x78, 0x50, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12,
0x34, 0x0a, 0x15, 0x6d, 0x61, 0x78, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x65,
0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x15,
0x6d, 0x61, 0x78, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x65, 0x72, 0x63, 0x65,
0x6e, 0x74, 0x61, 0x67, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x65, 0x6c, 0x6c, 0x65, 0x72, 0x57,
0x61, 0x6c, 0x6c, 0x65, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65, 0x6c,
0x6c, 0x65, 0x72, 0x57, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x22, 0x5e, 0x0a, 0x04, 0x54, 0x69, 0x65,
0x72, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x63, 0x65, 0x53, 0x61, 0x74, 0x18, 0x01, 0x20,
0x01, 0x28, 0x0d, 0x52, 0x08, 0x70, 0x72, 0x69, 0x63, 0x65, 0x53, 0x61, 0x74, 0x12, 0x14, 0x0a,
0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69,
0x6d, 0x69, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x6d, 0x61, 0x78, 0x50, 0x65, 0x72, 0x41, 0x64, 0x64,
0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x50,
0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x6f, 0x0a, 0x0e, 0x41, 0x63, 0x74,
0x69, 0x6f, 0x6e, 0x50, 0x75, 0x72, 0x63, 0x68, 0x61, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x70,
0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6e,
0x6f, 0x64, 0x65, 0x73, 0x61, 0x6c, 0x65, 0x2e, 0x50, 0x75, 0x72, 0x63, 0x68, 0x61, 0x73, 0x65,
0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64,
0x12, 0x28, 0x0a, 0x0f, 0x73, 0x65, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74,
0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x65, 0x6c, 0x6c, 0x65,
0x72, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0xcf, 0x01, 0x0a, 0x0f, 0x50,
0x75, 0x72, 0x63, 0x68, 0x61, 0x73, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x2e,
0x0a, 0x08, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x12, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x61, 0x6c, 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69,
0x6f, 0x6e, 0x49, 0x44, 0x52, 0x08, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x49, 0x44, 0x12, 0x26,
0x0a, 0x0e, 0x62, 0x75, 0x79, 0x65, 0x72, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79,
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x62, 0x75, 0x79, 0x65, 0x72, 0x50, 0x75, 0x62,
0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x44,
0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x73,
0x12, 0x26, 0x0a, 0x0e, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x41, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x53,
0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x41,
0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x61, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65,
0x4f, 0x75, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c,
0x74, 0x69, 0x6d, 0x65, 0x4f, 0x75, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x0a, 0x08,
0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63,
0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x18,
0x0a, 0x07, 0x74, 0x78, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52,
0x07, 0x74, 0x78, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0x8a, 0x01, 0x0a, 0x0e, 0x41, 0x63, 0x74,
0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x64,
0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65,
0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74,
0x65, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x6e,
0x6f, 0x64, 0x65, 0x49, 0x44, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x07, 0x6e, 0x6f,
0x64, 0x65, 0x49, 0x44, 0x73, 0x12, 0x2e, 0x0a, 0x08, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x49,
0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x61,
0x6c, 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x52, 0x08, 0x64, 0x65, 0x70,
0x6c, 0x6f, 0x79, 0x49, 0x44, 0x2a, 0x45, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12,
0x11, 0x0a, 0x0d, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x50, 0x4c, 0x4f, 0x59,
0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x55, 0x52,
0x43, 0x48, 0x41, 0x53, 0x45, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x41, 0x43, 0x54, 0x49, 0x4f,
0x4e, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x47, 0x41, 0x54, 0x45, 0x10, 0x02, 0x42, 0x43, 0x5a, 0x41,
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x61, 0x7a, 0x65, 0x2d,
0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2d,
0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x2f,
0x6e, 0x6f, 0x64, 0x65, 0x73, 0x61, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_modules_nodesale_protobuf_nodesale_proto_rawDescOnce sync.Once
file_modules_nodesale_protobuf_nodesale_proto_rawDescData = file_modules_nodesale_protobuf_nodesale_proto_rawDesc
)
func file_modules_nodesale_protobuf_nodesale_proto_rawDescGZIP() []byte {
file_modules_nodesale_protobuf_nodesale_proto_rawDescOnce.Do(func() {
file_modules_nodesale_protobuf_nodesale_proto_rawDescData = protoimpl.X.CompressGZIP(file_modules_nodesale_protobuf_nodesale_proto_rawDescData)
})
return file_modules_nodesale_protobuf_nodesale_proto_rawDescData
}
var file_modules_nodesale_protobuf_nodesale_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_modules_nodesale_protobuf_nodesale_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
var file_modules_nodesale_protobuf_nodesale_proto_goTypes = []interface{}{
(Action)(0), // 0: nodesale.Action
(*NodeSaleEvent)(nil), // 1: nodesale.NodeSaleEvent
(*ActionDeploy)(nil), // 2: nodesale.ActionDeploy
(*Tier)(nil), // 3: nodesale.Tier
(*ActionPurchase)(nil), // 4: nodesale.ActionPurchase
(*PurchasePayload)(nil), // 5: nodesale.PurchasePayload
(*ActionID)(nil), // 6: nodesale.ActionID
(*ActionDelegate)(nil), // 7: nodesale.ActionDelegate
}
var file_modules_nodesale_protobuf_nodesale_proto_depIdxs = []int32{
0, // 0: nodesale.NodeSaleEvent.action:type_name -> nodesale.Action
2, // 1: nodesale.NodeSaleEvent.deploy:type_name -> nodesale.ActionDeploy
4, // 2: nodesale.NodeSaleEvent.purchase:type_name -> nodesale.ActionPurchase
7, // 3: nodesale.NodeSaleEvent.delegate:type_name -> nodesale.ActionDelegate
3, // 4: nodesale.ActionDeploy.tiers:type_name -> nodesale.Tier
5, // 5: nodesale.ActionPurchase.payload:type_name -> nodesale.PurchasePayload
6, // 6: nodesale.PurchasePayload.deployID:type_name -> nodesale.ActionID
6, // 7: nodesale.ActionDelegate.deployID:type_name -> nodesale.ActionID
8, // [8:8] is the sub-list for method output_type
8, // [8:8] is the sub-list for method input_type
8, // [8:8] is the sub-list for extension type_name
8, // [8:8] is the sub-list for extension extendee
0, // [0:8] is the sub-list for field type_name
}
func init() { file_modules_nodesale_protobuf_nodesale_proto_init() }
func file_modules_nodesale_protobuf_nodesale_proto_init() {
if File_modules_nodesale_protobuf_nodesale_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_modules_nodesale_protobuf_nodesale_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*NodeSaleEvent); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_modules_nodesale_protobuf_nodesale_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ActionDeploy); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_modules_nodesale_protobuf_nodesale_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Tier); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_modules_nodesale_protobuf_nodesale_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ActionPurchase); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_modules_nodesale_protobuf_nodesale_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PurchasePayload); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_modules_nodesale_protobuf_nodesale_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ActionID); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_modules_nodesale_protobuf_nodesale_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ActionDelegate); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
file_modules_nodesale_protobuf_nodesale_proto_msgTypes[0].OneofWrappers = []interface{}{}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_modules_nodesale_protobuf_nodesale_proto_rawDesc,
NumEnums: 1,
NumMessages: 7,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_modules_nodesale_protobuf_nodesale_proto_goTypes,
DependencyIndexes: file_modules_nodesale_protobuf_nodesale_proto_depIdxs,
EnumInfos: file_modules_nodesale_protobuf_nodesale_proto_enumTypes,
MessageInfos: file_modules_nodesale_protobuf_nodesale_proto_msgTypes,
}.Build()
File_modules_nodesale_protobuf_nodesale_proto = out.File
file_modules_nodesale_protobuf_nodesale_proto_rawDesc = nil
file_modules_nodesale_protobuf_nodesale_proto_goTypes = nil
file_modules_nodesale_protobuf_nodesale_proto_depIdxs = nil
}

View File

@@ -0,0 +1,60 @@
syntax = "proto3";
// protoc modules/nodesale/protobuf/nodesale.proto --go_out=. --go_opt=module=github.com/gaze-network/indexer-network
package nodesale;
option go_package = "github.com/gaze-network/indexer-network/modules/nodesale/protobuf";
enum Action {
ACTION_DEPLOY = 0;
ACTION_PURCHASE = 1;
ACTION_DELEGATE = 2;
}
message NodeSaleEvent {
Action action = 1;
optional ActionDeploy deploy = 2;
optional ActionPurchase purchase = 3;
optional ActionDelegate delegate = 4;
}
message ActionDeploy {
string name = 1;
uint32 startsAt = 2;
uint32 endsAt = 3;
repeated Tier tiers = 4;
string sellerPublicKey = 5;
uint32 maxPerAddress = 6;
uint32 maxDiscountPercentage = 7;
string sellerWallet = 8;
}
message Tier {
uint32 priceSat = 1;
uint32 limit = 2;
uint32 maxPerAddress = 3;
}
message ActionPurchase {
PurchasePayload payload = 1;
string sellerSignature = 2;
}
message PurchasePayload {
ActionID deployID = 1;
string buyerPublicKey = 2;
repeated uint32 nodeIDs = 3;
int64 totalAmountSat = 4;
uint64 timeOutBlock = 5;
}
message ActionID {
uint64 block = 1;
uint32 txIndex = 2;
}
message ActionDelegate {
string delegateePublicKey = 1;
repeated uint32 nodeIDs = 2;
ActionID deployID = 3;
}

View File

@@ -0,0 +1,12 @@
package nodesale
import (
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcutil"
)
func (p *Processor) PubkeyToPkHashAddress(pubKey *btcec.PublicKey) btcutil.Address {
addrPubKey, _ := btcutil.NewAddressPubKey(pubKey.SerializeCompressed(), p.Network.ChainParams())
addrPubKeyHash := addrPubKey.AddressPubKeyHash()
return addrPubKeyHash
}

View File

@@ -0,0 +1,87 @@
package nodesale
import (
"context"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
purchasevalidator "github.com/gaze-network/indexer-network/modules/nodesale/internal/validator/purchase"
)
func (p *Processor) ProcessPurchase(ctx context.Context, qtx datagateway.NodeSaleDataGatewayWithTx, block *types.Block, event NodeSaleEvent) error {
purchase := event.EventMessage.Purchase
payload := purchase.Payload
validator := purchasevalidator.New()
validator.EqualXonlyPublicKey(payload.BuyerPublicKey, event.TxPubkey)
_, deploy, err := validator.NodeSaleExists(ctx, qtx, payload)
if err != nil {
return errors.Wrap(err, "cannot query. Something wrong.")
}
validator.ValidTimestamp(deploy, block.Header.Timestamp)
validator.WithinTimeoutBlock(payload.TimeOutBlock, uint64(event.Transaction.BlockHeight))
validator.VerifySignature(purchase, deploy)
_, tierMap := validator.ValidTiers(payload, deploy)
tiers := tierMap.Tiers
buyingTiersCount := tierMap.BuyingTiersCount
nodeIdToTier := tierMap.NodeIdToTier
_, err = validator.ValidUnpurchasedNodes(ctx, qtx, payload)
if err != nil {
return errors.Wrap(err, "cannot query. Something wrong.")
}
_, meta := validator.ValidPaidAmount(payload, deploy, event.InputValue, tiers, buyingTiersCount, p.Network.ChainParams())
_, err = validator.WithinLimit(ctx, qtx, payload, deploy, tiers, buyingTiersCount)
if err != nil {
return errors.Wrap(err, "cannot query. Something wrong.")
}
err = qtx.CreateEvent(ctx, entity.NodeSaleEvent{
TxHash: event.Transaction.TxHash.String(),
TxIndex: int32(event.Transaction.Index),
Action: int32(event.EventMessage.Action),
RawMessage: event.RawData,
ParsedMessage: event.EventJson,
BlockTimestamp: block.Header.Timestamp,
BlockHash: event.Transaction.BlockHash.String(),
BlockHeight: event.Transaction.BlockHeight,
Valid: validator.Valid,
WalletAddress: p.PubkeyToPkHashAddress(event.TxPubkey).EncodeAddress(),
Metadata: meta,
Reason: validator.Reason,
})
if err != nil {
return errors.Wrap(err, "Failed to insert event")
}
if validator.Valid {
// add to node
for _, nodeId := range payload.NodeIDs {
err := qtx.CreateNode(ctx, entity.Node{
SaleBlock: deploy.BlockHeight,
SaleTxIndex: deploy.TxIndex,
NodeID: nodeId,
TierIndex: nodeIdToTier[nodeId],
DelegatedTo: "",
OwnerPublicKey: payload.BuyerPublicKey,
PurchaseTxHash: event.Transaction.TxHash.String(),
DelegateTxHash: "",
})
if err != nil {
return errors.Wrap(err, "Failed to insert node")
}
}
}
return nil
}

View File

@@ -0,0 +1,902 @@
package nodesale
import (
"context"
"encoding/hex"
"testing"
"time"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway/mocks"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/validator"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/validator/purchase"
"github.com/gaze-network/indexer-network/modules/nodesale/protobuf"
"github.com/samber/lo"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
)
func TestInvalidPurchase(t *testing.T) {
ctx := context.Background()
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
buyerPrivateKey, err := btcec.NewPrivateKey()
require.NoError(t, err)
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
message := &protobuf.NodeSaleEvent{
Action: protobuf.Action_ACTION_PURCHASE,
Purchase: &protobuf.ActionPurchase{
Payload: &protobuf.PurchasePayload{
DeployID: &protobuf.ActionID{
Block: 111,
TxIndex: 1,
},
NodeIDs: []uint32{1, 2},
BuyerPublicKey: buyerPubkeyHex,
TotalAmountSat: 500,
TimeOutBlock: uint64(testBlockHeight) + 5,
},
},
}
event, block := assembleTestEvent(buyerPrivateKey, "030303030303", "030303030303", 0, 0, message)
mockDgTx.EXPECT().GetNodeSale(mock.Anything, mock.Anything).Return(nil, nil)
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
return event.Valid == false
})).Return(nil)
err = p.ProcessPurchase(ctx, mockDgTx, block, event)
require.NoError(t, err)
mockDgTx.AssertNotCalled(t, "CreateNode")
}
func TestInvalidBuyerKey(t *testing.T) {
ctx := context.Background()
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
strangerPrivateKey, _ := btcec.NewPrivateKey()
strangerPrivateKeyHex := hex.EncodeToString(strangerPrivateKey.PubKey().SerializeCompressed())
buyerPrivateKey, _ := btcec.NewPrivateKey()
message := &protobuf.NodeSaleEvent{
Action: protobuf.Action_ACTION_PURCHASE,
Purchase: &protobuf.ActionPurchase{
Payload: &protobuf.PurchasePayload{
DeployID: &protobuf.ActionID{
Block: 100,
TxIndex: 1,
},
NodeIDs: []uint32{1, 2},
BuyerPublicKey: strangerPrivateKeyHex,
TotalAmountSat: 200,
TimeOutBlock: uint64(testBlockHeight) + 5,
},
},
}
event, block := assembleTestEvent(buyerPrivateKey, "0707070707", "0707070707", 0, 0, message)
block.Header.Timestamp = time.Now().UTC()
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
return event.Valid == false && event.Reason == validator.INVALID_PUBKEY
})).Return(nil)
err := p.ProcessPurchase(ctx, mockDgTx, block, event)
require.NoError(t, err)
mockDgTx.AssertNotCalled(t, "CreateNode")
}
func TestInvalidTimestamp(t *testing.T) {
ctx := context.Background()
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
sellerPrivateKey, err := btcec.NewPrivateKey()
require.NoError(t, err)
sellerPubkeyHex := hex.EncodeToString(sellerPrivateKey.PubKey().SerializeCompressed())
sellerWallet := p.PubkeyToPkHashAddress(sellerPrivateKey.PubKey())
startAt := time.Now().Add(time.Hour * -1)
endAt := time.Now().Add(time.Hour * 1)
tiers := lo.Map([]*protobuf.Tier{
{
PriceSat: 100,
Limit: 5,
MaxPerAddress: 100,
},
{
PriceSat: 200,
Limit: 5,
MaxPerAddress: 100,
},
}, func(tier *protobuf.Tier, _ int) []byte {
tierJson, err := protojson.Marshal(tier)
require.NoError(t, err)
return tierJson
})
mockDgTx.EXPECT().GetNodeSale(mock.Anything, datagateway.GetNodeSaleParams{
BlockHeight: 100,
TxIndex: 1,
}).Return([]entity.NodeSale{
{
BlockHeight: 100,
TxIndex: 1,
Name: t.Name(),
StartsAt: startAt,
EndsAt: endAt,
Tiers: tiers,
SellerPublicKey: sellerPubkeyHex,
MaxPerAddress: 100,
DeployTxHash: "040404040404",
MaxDiscountPercentage: 50,
SellerWallet: sellerWallet.EncodeAddress(),
},
}, nil)
buyerPrivateKey, _ := btcec.NewPrivateKey()
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
message := &protobuf.NodeSaleEvent{
Action: protobuf.Action_ACTION_PURCHASE,
Purchase: &protobuf.ActionPurchase{
Payload: &protobuf.PurchasePayload{
DeployID: &protobuf.ActionID{
Block: 100,
TxIndex: 1,
},
NodeIDs: []uint32{1, 2},
BuyerPublicKey: buyerPubkeyHex,
TotalAmountSat: 200,
TimeOutBlock: uint64(testBlockHeight) + 5,
},
},
}
event, block := assembleTestEvent(buyerPrivateKey, "050505050505", "050505050505", 0, 0, message)
block.Header.Timestamp = time.Now().UTC().Add(time.Hour * 2)
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
return event.Valid == false && event.Reason == purchase.PURCHASE_TIMEOUT
})).Return(nil)
err = p.ProcessPurchase(ctx, mockDgTx, block, event)
require.NoError(t, err)
mockDgTx.AssertNotCalled(t, "CreateNode")
}
func TestTimeOut(t *testing.T) {
ctx := context.Background()
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
sellerPrivateKey, _ := btcec.NewPrivateKey()
sellerPubkeyHex := hex.EncodeToString(sellerPrivateKey.PubKey().SerializeCompressed())
sellerWallet := p.PubkeyToPkHashAddress(sellerPrivateKey.PubKey())
startAt := time.Now().Add(time.Hour * -1)
endAt := time.Now().Add(time.Hour * 1)
tiers := lo.Map([]*protobuf.Tier{
{
PriceSat: 100,
Limit: 5,
MaxPerAddress: 100,
},
{
PriceSat: 200,
Limit: 5,
MaxPerAddress: 100,
},
}, func(tier *protobuf.Tier, _ int) []byte {
tierJson, err := protojson.Marshal(tier)
require.NoError(t, err)
return tierJson
})
mockDgTx.EXPECT().GetNodeSale(mock.Anything, datagateway.GetNodeSaleParams{
BlockHeight: 100,
TxIndex: 1,
}).Return([]entity.NodeSale{
{
BlockHeight: 100,
TxIndex: 1,
Name: t.Name(),
StartsAt: startAt,
EndsAt: endAt,
Tiers: tiers,
SellerPublicKey: sellerPubkeyHex,
MaxPerAddress: 100,
DeployTxHash: "040404040404",
MaxDiscountPercentage: 50,
SellerWallet: sellerWallet.EncodeAddress(),
},
}, nil)
buyerPrivateKey, _ := btcec.NewPrivateKey()
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
message := &protobuf.NodeSaleEvent{
Action: protobuf.Action_ACTION_PURCHASE,
Purchase: &protobuf.ActionPurchase{
Payload: &protobuf.PurchasePayload{
DeployID: &protobuf.ActionID{
Block: 100,
TxIndex: 1,
},
NodeIDs: []uint32{1, 2},
BuyerPublicKey: buyerPubkeyHex,
TimeOutBlock: uint64(testBlockHeight) - 5,
TotalAmountSat: 200,
},
},
}
event, block := assembleTestEvent(buyerPrivateKey, "090909090909", "090909090909", 0, 0, message)
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
return event.Valid == false && event.Reason == purchase.BLOCK_HEIGHT_TIMEOUT
})).Return(nil)
err := p.ProcessPurchase(ctx, mockDgTx, block, event)
require.NoError(t, err)
mockDgTx.AssertNotCalled(t, "CreateNode")
}
func TestSignatureInvalid(t *testing.T) {
ctx := context.Background()
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
sellerPrivateKey, _ := btcec.NewPrivateKey()
sellerPubkeyHex := hex.EncodeToString(sellerPrivateKey.PubKey().SerializeCompressed())
sellerWallet := p.PubkeyToPkHashAddress(sellerPrivateKey.PubKey())
startAt := time.Now().Add(time.Hour * -1)
endAt := time.Now().Add(time.Hour * 1)
tiers := lo.Map([]*protobuf.Tier{
{
PriceSat: 100,
Limit: 5,
MaxPerAddress: 100,
},
{
PriceSat: 200,
Limit: 5,
MaxPerAddress: 100,
},
}, func(tier *protobuf.Tier, _ int) []byte {
tierJson, err := protojson.Marshal(tier)
require.NoError(t, err)
return tierJson
})
mockDgTx.EXPECT().GetNodeSale(mock.Anything, datagateway.GetNodeSaleParams{
BlockHeight: 100,
TxIndex: 1,
}).Return([]entity.NodeSale{
{
BlockHeight: 100,
TxIndex: 1,
Name: t.Name(),
StartsAt: startAt,
EndsAt: endAt,
Tiers: tiers,
SellerPublicKey: sellerPubkeyHex,
MaxPerAddress: 100,
DeployTxHash: "040404040404",
MaxDiscountPercentage: 50,
SellerWallet: sellerWallet.EncodeAddress(),
},
}, nil)
buyerPrivateKey, _ := btcec.NewPrivateKey()
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
payload := &protobuf.PurchasePayload{
DeployID: &protobuf.ActionID{
Block: 100,
TxIndex: 1,
},
NodeIDs: []uint32{1, 2},
BuyerPublicKey: buyerPubkeyHex,
TimeOutBlock: testBlockHeight + 5,
}
payloadBytes, _ := proto.Marshal(payload)
payloadHash := chainhash.DoubleHashB(payloadBytes)
signature := ecdsa.Sign(buyerPrivateKey, payloadHash[:])
signatureHex := hex.EncodeToString(signature.Serialize())
message := &protobuf.NodeSaleEvent{
Action: protobuf.Action_ACTION_PURCHASE,
Purchase: &protobuf.ActionPurchase{
Payload: payload,
SellerSignature: signatureHex,
},
}
event, block := assembleTestEvent(buyerPrivateKey, "0B0B0B", "0B0B0B", 0, 0, message)
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
return event.Valid == false && event.Reason == purchase.INVALID_SIGNATURE
})).Return(nil)
err := p.ProcessPurchase(ctx, mockDgTx, block, event)
require.NoError(t, err)
mockDgTx.AssertNotCalled(t, "CreateNode")
}
func TestValidPurchase(t *testing.T) {
ctx := context.Background()
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
sellerPrivateKey, _ := btcec.NewPrivateKey()
sellerPubkeyHex := hex.EncodeToString(sellerPrivateKey.PubKey().SerializeCompressed())
sellerWallet := p.PubkeyToPkHashAddress(sellerPrivateKey.PubKey())
startAt := time.Now().Add(time.Hour * -1)
endAt := time.Now().Add(time.Hour * 1)
tiers := lo.Map([]*protobuf.Tier{
{
PriceSat: 100,
Limit: 5,
MaxPerAddress: 100,
},
{
PriceSat: 200,
Limit: 4,
MaxPerAddress: 2,
},
{
PriceSat: 400,
Limit: 3,
MaxPerAddress: 100,
},
}, func(tier *protobuf.Tier, _ int) []byte {
tierJson, err := protojson.Marshal(tier)
require.NoError(t, err)
return tierJson
})
mockDgTx.EXPECT().GetNodeSale(mock.Anything, datagateway.GetNodeSaleParams{
BlockHeight: 100,
TxIndex: 1,
}).Return([]entity.NodeSale{
{
BlockHeight: 100,
TxIndex: 1,
Name: t.Name(),
StartsAt: startAt,
EndsAt: endAt,
Tiers: tiers,
SellerPublicKey: sellerPubkeyHex,
MaxPerAddress: 100,
DeployTxHash: "040404040404",
MaxDiscountPercentage: 50,
SellerWallet: sellerWallet.EncodeAddress(),
},
}, nil)
mockDgTx.EXPECT().GetNodesByIds(mock.Anything, mock.Anything).Return(nil, nil)
mockDgTx.EXPECT().GetNodesByOwner(mock.Anything, mock.Anything).Return(nil, nil)
buyerPrivateKey, _ := btcec.NewPrivateKey()
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
payload := &protobuf.PurchasePayload{
DeployID: &protobuf.ActionID{
Block: 100,
TxIndex: 1,
},
BuyerPublicKey: buyerPubkeyHex,
TimeOutBlock: uint64(testBlockHeight) + 5,
NodeIDs: []uint32{0, 5, 6, 9},
TotalAmountSat: 500,
}
payloadBytes, _ := proto.Marshal(payload)
payloadHash := chainhash.DoubleHashB(payloadBytes)
signature := ecdsa.Sign(sellerPrivateKey, payloadHash[:])
signatureHex := hex.EncodeToString(signature.Serialize())
message := &protobuf.NodeSaleEvent{
Action: protobuf.Action_ACTION_PURCHASE,
Purchase: &protobuf.ActionPurchase{
Payload: payload,
SellerSignature: signatureHex,
},
}
event, block := assembleTestEvent(buyerPrivateKey, "0D0D0D0D", "0D0D0D0D", 0, 0, message)
event.InputValue = 500
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
return event.Valid == true && event.Reason == ""
})).Return(nil)
mockDgTx.EXPECT().CreateNode(mock.Anything, mock.MatchedBy(func(node entity.Node) bool {
return node.NodeID == 0 &&
node.TierIndex == 0 &&
node.OwnerPublicKey == buyerPubkeyHex &&
node.PurchaseTxHash == event.Transaction.TxHash.String() &&
node.SaleBlock == 100 &&
node.SaleTxIndex == 1
})).Return(nil)
mockDgTx.EXPECT().CreateNode(mock.Anything, mock.MatchedBy(func(node entity.Node) bool {
return node.NodeID == 5 &&
node.TierIndex == 1 &&
node.OwnerPublicKey == buyerPubkeyHex &&
node.PurchaseTxHash == event.Transaction.TxHash.String() &&
node.SaleBlock == 100 &&
node.SaleTxIndex == 1
})).Return(nil)
mockDgTx.EXPECT().CreateNode(mock.Anything, mock.MatchedBy(func(node entity.Node) bool {
return node.NodeID == 6 &&
node.TierIndex == 1 &&
node.OwnerPublicKey == buyerPubkeyHex &&
node.PurchaseTxHash == event.Transaction.TxHash.String() &&
node.SaleBlock == 100 &&
node.SaleTxIndex == 1
})).Return(nil)
mockDgTx.EXPECT().CreateNode(mock.Anything, mock.MatchedBy(func(node entity.Node) bool {
return node.NodeID == 9 &&
node.TierIndex == 2 &&
node.OwnerPublicKey == buyerPubkeyHex &&
node.PurchaseTxHash == event.Transaction.TxHash.String() &&
node.SaleBlock == 100 &&
node.SaleTxIndex == 1
})).Return(nil)
err := p.ProcessPurchase(ctx, mockDgTx, block, event)
require.NoError(t, err)
}
func TestMismatchPayment(t *testing.T) {
ctx := context.Background()
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
sellerPrivateKey, _ := btcec.NewPrivateKey()
sellerPubkeyHex := hex.EncodeToString(sellerPrivateKey.PubKey().SerializeCompressed())
sellerWallet := p.PubkeyToPkHashAddress(sellerPrivateKey.PubKey())
startAt := time.Now().Add(time.Hour * -1)
endAt := time.Now().Add(time.Hour * 1)
tiers := lo.Map([]*protobuf.Tier{
{
PriceSat: 100,
Limit: 5,
MaxPerAddress: 100,
},
{
PriceSat: 200,
Limit: 4,
MaxPerAddress: 2,
},
{
PriceSat: 400,
Limit: 3,
MaxPerAddress: 100,
},
}, func(tier *protobuf.Tier, _ int) []byte {
tierJson, err := protojson.Marshal(tier)
require.NoError(t, err)
return tierJson
})
mockDgTx.EXPECT().GetNodeSale(mock.Anything, datagateway.GetNodeSaleParams{
BlockHeight: 100,
TxIndex: 1,
}).Return([]entity.NodeSale{
{
BlockHeight: 100,
TxIndex: 1,
Name: t.Name(),
StartsAt: startAt,
EndsAt: endAt,
Tiers: tiers,
SellerPublicKey: sellerPubkeyHex,
MaxPerAddress: 100,
DeployTxHash: "040404040404",
MaxDiscountPercentage: 50,
SellerWallet: sellerWallet.EncodeAddress(),
},
}, nil)
mockDgTx.EXPECT().GetNodesByIds(mock.Anything, mock.Anything).Return(nil, nil)
buyerPrivateKey, _ := btcec.NewPrivateKey()
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
payload := &protobuf.PurchasePayload{
DeployID: &protobuf.ActionID{
Block: 100,
TxIndex: 1,
},
BuyerPublicKey: buyerPubkeyHex,
TimeOutBlock: uint64(testBlockHeight) + 5,
NodeIDs: []uint32{0, 5, 6, 9},
TotalAmountSat: 500,
}
payloadBytes, _ := proto.Marshal(payload)
payloadHash := chainhash.DoubleHashB(payloadBytes)
signature := ecdsa.Sign(sellerPrivateKey, payloadHash[:])
signatureHex := hex.EncodeToString(signature.Serialize())
message := &protobuf.NodeSaleEvent{
Action: protobuf.Action_ACTION_PURCHASE,
Purchase: &protobuf.ActionPurchase{
Payload: payload,
SellerSignature: signatureHex,
},
}
event, block := assembleTestEvent(buyerPrivateKey, "0D0D0D0D", "0D0D0D0D", 0, 0, message)
event.InputValue = 400
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
return event.Valid == false && event.Reason == purchase.INVALID_PAYMENT
})).Return(nil)
err := p.ProcessPurchase(ctx, mockDgTx, block, event)
require.NoError(t, err)
}
func TestInsufficientFund(t *testing.T) {
ctx := context.Background()
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
sellerPrivateKey, _ := btcec.NewPrivateKey()
sellerPubkeyHex := hex.EncodeToString(sellerPrivateKey.PubKey().SerializeCompressed())
sellerWallet := p.PubkeyToPkHashAddress(sellerPrivateKey.PubKey())
startAt := time.Now().Add(time.Hour * -1)
endAt := time.Now().Add(time.Hour * 1)
tiers := lo.Map([]*protobuf.Tier{
{
PriceSat: 100,
Limit: 5,
MaxPerAddress: 100,
},
{
PriceSat: 200,
Limit: 4,
MaxPerAddress: 2,
},
{
PriceSat: 400,
Limit: 3,
MaxPerAddress: 100,
},
}, func(tier *protobuf.Tier, _ int) []byte {
tierJson, err := protojson.Marshal(tier)
require.NoError(t, err)
return tierJson
})
mockDgTx.EXPECT().GetNodeSale(mock.Anything, datagateway.GetNodeSaleParams{
BlockHeight: 100,
TxIndex: 1,
}).Return([]entity.NodeSale{
{
BlockHeight: 100,
TxIndex: 1,
Name: t.Name(),
StartsAt: startAt,
EndsAt: endAt,
Tiers: tiers,
SellerPublicKey: sellerPubkeyHex,
MaxPerAddress: 100,
DeployTxHash: "040404040404",
MaxDiscountPercentage: 50,
SellerWallet: sellerWallet.EncodeAddress(),
},
}, nil)
mockDgTx.EXPECT().GetNodesByIds(mock.Anything, mock.Anything).Return(nil, nil)
buyerPrivateKey, _ := btcec.NewPrivateKey()
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
payload := &protobuf.PurchasePayload{
DeployID: &protobuf.ActionID{
Block: 100,
TxIndex: 1,
},
BuyerPublicKey: buyerPubkeyHex,
TimeOutBlock: uint64(testBlockHeight) + 5,
NodeIDs: []uint32{0, 5, 6, 9},
TotalAmountSat: 200,
}
payloadBytes, _ := proto.Marshal(payload)
payloadHash := chainhash.DoubleHashB(payloadBytes)
signature := ecdsa.Sign(sellerPrivateKey, payloadHash[:])
signatureHex := hex.EncodeToString(signature.Serialize())
message := &protobuf.NodeSaleEvent{
Action: protobuf.Action_ACTION_PURCHASE,
Purchase: &protobuf.ActionPurchase{
Payload: payload,
SellerSignature: signatureHex,
},
}
event, block := assembleTestEvent(buyerPrivateKey, "0D0D0D0D", "0D0D0D0D", 0, 0, message)
event.InputValue = 200
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
return event.Valid == false && event.Reason == purchase.INSUFFICIENT_FUND
})).Return(nil)
err := p.ProcessPurchase(ctx, mockDgTx, block, event)
require.NoError(t, err)
}
func TestBuyingLimit(t *testing.T) {
ctx := context.Background()
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
sellerPrivateKey, _ := btcec.NewPrivateKey()
sellerPubkeyHex := hex.EncodeToString(sellerPrivateKey.PubKey().SerializeCompressed())
sellerWallet := p.PubkeyToPkHashAddress(sellerPrivateKey.PubKey())
startAt := time.Now().Add(time.Hour * -1)
endAt := time.Now().Add(time.Hour * 1)
tiers := lo.Map([]*protobuf.Tier{
{
PriceSat: 100,
Limit: 5,
MaxPerAddress: 100,
},
{
PriceSat: 200,
Limit: 4,
MaxPerAddress: 2,
},
{
PriceSat: 400,
Limit: 50,
MaxPerAddress: 100,
},
}, func(tier *protobuf.Tier, _ int) []byte {
tierJson, err := protojson.Marshal(tier)
require.NoError(t, err)
return tierJson
})
mockDgTx.EXPECT().GetNodeSale(mock.Anything, datagateway.GetNodeSaleParams{
BlockHeight: 100,
TxIndex: 1,
}).Return([]entity.NodeSale{
{
BlockHeight: 100,
TxIndex: 1,
Name: t.Name(),
StartsAt: startAt,
EndsAt: endAt,
Tiers: tiers,
SellerPublicKey: sellerPubkeyHex,
MaxPerAddress: 2,
DeployTxHash: "040404040404",
MaxDiscountPercentage: 50,
SellerWallet: sellerWallet.EncodeAddress(),
},
}, nil)
buyerPrivateKey, _ := btcec.NewPrivateKey()
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
mockDgTx.EXPECT().GetNodesByIds(mock.Anything, mock.Anything).Return(nil, nil)
mockDgTx.EXPECT().GetNodesByOwner(mock.Anything, datagateway.GetNodesByOwnerParams{
SaleBlock: 100,
SaleTxIndex: 1,
OwnerPublicKey: buyerPubkeyHex,
}).Return([]entity.Node{
{
SaleBlock: 100,
SaleTxIndex: 1,
NodeID: 9,
TierIndex: 2,
OwnerPublicKey: buyerPubkeyHex,
},
{
SaleBlock: 100,
SaleTxIndex: 1,
NodeID: 10,
TierIndex: 2,
OwnerPublicKey: buyerPubkeyHex,
},
}, nil)
payload := &protobuf.PurchasePayload{
DeployID: &protobuf.ActionID{
Block: 100,
TxIndex: 1,
},
BuyerPublicKey: buyerPubkeyHex,
TimeOutBlock: uint64(testBlockHeight) + 5,
NodeIDs: []uint32{11},
TotalAmountSat: 600,
}
payloadBytes, _ := proto.Marshal(payload)
payloadHash := chainhash.DoubleHashB(payloadBytes)
signature := ecdsa.Sign(sellerPrivateKey, payloadHash[:])
signatureHex := hex.EncodeToString(signature.Serialize())
message := &protobuf.NodeSaleEvent{
Action: protobuf.Action_ACTION_PURCHASE,
Purchase: &protobuf.ActionPurchase{
Payload: payload,
SellerSignature: signatureHex,
},
}
event, block := assembleTestEvent(buyerPrivateKey, "22222222", "22222222", 0, 0, message)
event.InputValue = 600
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
return event.Valid == false && event.Reason == purchase.OVER_LIMIT_PER_ADDR
})).Return(nil)
err := p.ProcessPurchase(ctx, mockDgTx, block, event)
require.NoError(t, err)
mockDgTx.AssertNotCalled(t, "CreateNode")
}
func TestBuyingTierLimit(t *testing.T) {
ctx := context.Background()
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
sellerPrivateKey, _ := btcec.NewPrivateKey()
sellerPubkeyHex := hex.EncodeToString(sellerPrivateKey.PubKey().SerializeCompressed())
sellerWallet := p.PubkeyToPkHashAddress(sellerPrivateKey.PubKey())
startAt := time.Now().Add(time.Hour * -1)
endAt := time.Now().Add(time.Hour * 1)
tiers := lo.Map([]*protobuf.Tier{
{
PriceSat: 100,
Limit: 5,
MaxPerAddress: 100,
},
{
PriceSat: 200,
Limit: 4,
MaxPerAddress: 2,
},
{
PriceSat: 400,
Limit: 50,
MaxPerAddress: 3,
},
}, func(tier *protobuf.Tier, _ int) []byte {
tierJson, err := protojson.Marshal(tier)
require.NoError(t, err)
return tierJson
})
mockDgTx.EXPECT().GetNodeSale(mock.Anything, datagateway.GetNodeSaleParams{
BlockHeight: 100,
TxIndex: 1,
}).Return([]entity.NodeSale{
{
BlockHeight: 100,
TxIndex: 1,
Name: t.Name(),
StartsAt: startAt,
EndsAt: endAt,
Tiers: tiers,
SellerPublicKey: sellerPubkeyHex,
MaxPerAddress: 100,
DeployTxHash: "040404040404",
MaxDiscountPercentage: 50,
SellerWallet: sellerWallet.EncodeAddress(),
},
}, nil)
buyerPrivateKey, _ := btcec.NewPrivateKey()
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
mockDgTx.EXPECT().GetNodesByIds(mock.Anything, mock.Anything).Return(nil, nil)
mockDgTx.EXPECT().GetNodesByOwner(mock.Anything, datagateway.GetNodesByOwnerParams{
SaleBlock: 100,
SaleTxIndex: 1,
OwnerPublicKey: buyerPubkeyHex,
}).Return([]entity.Node{
{
SaleBlock: 100,
SaleTxIndex: 1,
NodeID: 9,
TierIndex: 2,
OwnerPublicKey: buyerPubkeyHex,
},
{
SaleBlock: 100,
SaleTxIndex: 1,
NodeID: 10,
TierIndex: 2,
OwnerPublicKey: buyerPubkeyHex,
},
{
SaleBlock: 100,
SaleTxIndex: 1,
NodeID: 11,
TierIndex: 2,
OwnerPublicKey: buyerPubkeyHex,
},
}, nil)
payload := &protobuf.PurchasePayload{
DeployID: &protobuf.ActionID{
Block: 100,
TxIndex: 1,
},
BuyerPublicKey: buyerPubkeyHex,
TimeOutBlock: uint64(testBlockHeight) + 5,
NodeIDs: []uint32{12, 13, 14},
TotalAmountSat: 600,
}
payloadBytes, _ := proto.Marshal(payload)
payloadHash := chainhash.DoubleHashB(payloadBytes)
signature := ecdsa.Sign(sellerPrivateKey, payloadHash[:])
signatureHex := hex.EncodeToString(signature.Serialize())
message := &protobuf.NodeSaleEvent{
Action: protobuf.Action_ACTION_PURCHASE,
Purchase: &protobuf.ActionPurchase{
Payload: payload,
SellerSignature: signatureHex,
},
}
event, block := assembleTestEvent(buyerPrivateKey, "10101010", "10101010", 0, 0, message)
event.InputValue = 600
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
return event.Valid == false && event.Reason == purchase.OVER_LIMIT_PER_TIER
})).Return(nil)
err := p.ProcessPurchase(ctx, mockDgTx, block, event)
require.NoError(t, err)
}

View File

@@ -0,0 +1,62 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.27.0
// source: blocks.sql
package gen
import (
"context"
)
const createBlock = `-- name: CreateBlock :exec
INSERT INTO blocks ("block_height", "block_hash", "module")
VALUES ($1, $2, $3)
`
type CreateBlockParams struct {
BlockHeight int64
BlockHash string
Module string
}
func (q *Queries) CreateBlock(ctx context.Context, arg CreateBlockParams) error {
_, err := q.db.Exec(ctx, createBlock, arg.BlockHeight, arg.BlockHash, arg.Module)
return err
}
const getBlock = `-- name: GetBlock :one
SELECT block_height, block_hash, module FROM blocks
WHERE "block_height" = $1
`
func (q *Queries) GetBlock(ctx context.Context, blockHeight int64) (Block, error) {
row := q.db.QueryRow(ctx, getBlock, blockHeight)
var i Block
err := row.Scan(&i.BlockHeight, &i.BlockHash, &i.Module)
return i, err
}
const getLastProcessedBlock = `-- name: GetLastProcessedBlock :one
SELECT block_height, block_hash, module FROM blocks ORDER BY block_height DESC LIMIT 1
`
func (q *Queries) GetLastProcessedBlock(ctx context.Context) (Block, error) {
row := q.db.QueryRow(ctx, getLastProcessedBlock)
var i Block
err := row.Scan(&i.BlockHeight, &i.BlockHash, &i.Module)
return i, err
}
const removeBlockFrom = `-- name: RemoveBlockFrom :execrows
DELETE FROM blocks
WHERE "block_height" >= $1
`
func (q *Queries) RemoveBlockFrom(ctx context.Context, fromBlock int64) (int64, error) {
result, err := q.db.Exec(ctx, removeBlockFrom, fromBlock)
if err != nil {
return 0, err
}
return result.RowsAffected(), nil
}

View File

@@ -0,0 +1,32 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.27.0
package gen
import (
"context"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
)
type DBTX interface {
Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error)
Query(context.Context, string, ...interface{}) (pgx.Rows, error)
QueryRow(context.Context, string, ...interface{}) pgx.Row
}
func New(db DBTX) *Queries {
return &Queries{db: db}
}
type Queries struct {
db DBTX
}
func (q *Queries) WithTx(tx pgx.Tx) *Queries {
return &Queries{
db: tx,
}
}

View File

@@ -0,0 +1,104 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.27.0
// source: events.sql
package gen
import (
"context"
"github.com/jackc/pgx/v5/pgtype"
)
const createEvent = `-- name: CreateEvent :exec
INSERT INTO events ("tx_hash", "block_height", "tx_index", "wallet_address", "valid", "action",
"raw_message", "parsed_message", "block_timestamp", "block_hash", "metadata",
"reason")
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)
`
type CreateEventParams struct {
TxHash string
BlockHeight int64
TxIndex int32
WalletAddress string
Valid bool
Action int32
RawMessage []byte
ParsedMessage []byte
BlockTimestamp pgtype.Timestamp
BlockHash string
Metadata []byte
Reason string
}
func (q *Queries) CreateEvent(ctx context.Context, arg CreateEventParams) error {
_, err := q.db.Exec(ctx, createEvent,
arg.TxHash,
arg.BlockHeight,
arg.TxIndex,
arg.WalletAddress,
arg.Valid,
arg.Action,
arg.RawMessage,
arg.ParsedMessage,
arg.BlockTimestamp,
arg.BlockHash,
arg.Metadata,
arg.Reason,
)
return err
}
const getEventsByWallet = `-- name: GetEventsByWallet :many
SELECT tx_hash, block_height, tx_index, wallet_address, valid, action, raw_message, parsed_message, block_timestamp, block_hash, metadata, reason
FROM events
WHERE wallet_address = $1
`
func (q *Queries) GetEventsByWallet(ctx context.Context, walletAddress string) ([]Event, error) {
rows, err := q.db.Query(ctx, getEventsByWallet, walletAddress)
if err != nil {
return nil, err
}
defer rows.Close()
var items []Event
for rows.Next() {
var i Event
if err := rows.Scan(
&i.TxHash,
&i.BlockHeight,
&i.TxIndex,
&i.WalletAddress,
&i.Valid,
&i.Action,
&i.RawMessage,
&i.ParsedMessage,
&i.BlockTimestamp,
&i.BlockHash,
&i.Metadata,
&i.Reason,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const removeEventsFromBlock = `-- name: RemoveEventsFromBlock :execrows
DELETE FROM events
WHERE "block_height" >= $1
`
func (q *Queries) RemoveEventsFromBlock(ctx context.Context, fromBlock int64) (int64, error) {
result, err := q.db.Exec(ctx, removeEventsFromBlock, fromBlock)
if err != nil {
return 0, err
}
return result.RowsAffected(), nil
}

View File

@@ -0,0 +1,55 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.27.0
package gen
import (
"github.com/jackc/pgx/v5/pgtype"
)
type Block struct {
BlockHeight int64
BlockHash string
Module string
}
type Event struct {
TxHash string
BlockHeight int64
TxIndex int32
WalletAddress string
Valid bool
Action int32
RawMessage []byte
ParsedMessage []byte
BlockTimestamp pgtype.Timestamp
BlockHash string
Metadata []byte
Reason string
}
type Node struct {
SaleBlock int64
SaleTxIndex int32
NodeID int32
TierIndex int32
DelegatedTo string
OwnerPublicKey string
PurchaseTxHash string
DelegateTxHash string
}
type NodeSale struct {
BlockHeight int64
TxIndex int32
Name string
StartsAt pgtype.Timestamp
EndsAt pgtype.Timestamp
Tiers [][]byte
SellerPublicKey string
MaxPerAddress int32
DeployTxHash string
MaxDiscountPercentage int32
SellerWallet string
}

View File

@@ -0,0 +1,312 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.27.0
// source: nodes.sql
package gen
import (
"context"
)
const clearDelegate = `-- name: ClearDelegate :execrows
UPDATE nodes
SET "delegated_to" = ''
WHERE "delegate_tx_hash" = ''
`
func (q *Queries) ClearDelegate(ctx context.Context) (int64, error) {
result, err := q.db.Exec(ctx, clearDelegate)
if err != nil {
return 0, err
}
return result.RowsAffected(), nil
}
const createNode = `-- name: CreateNode :exec
INSERT INTO nodes (sale_block, sale_tx_index, node_id, tier_index, delegated_to, owner_public_key, purchase_tx_hash, delegate_tx_hash)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
`
type CreateNodeParams struct {
SaleBlock int64
SaleTxIndex int32
NodeID int32
TierIndex int32
DelegatedTo string
OwnerPublicKey string
PurchaseTxHash string
DelegateTxHash string
}
func (q *Queries) CreateNode(ctx context.Context, arg CreateNodeParams) error {
_, err := q.db.Exec(ctx, createNode,
arg.SaleBlock,
arg.SaleTxIndex,
arg.NodeID,
arg.TierIndex,
arg.DelegatedTo,
arg.OwnerPublicKey,
arg.PurchaseTxHash,
arg.DelegateTxHash,
)
return err
}
const getNodeCountByTierIndex = `-- name: GetNodeCountByTierIndex :many
SELECT (tiers.tier_index)::int AS tier_index, count(nodes.tier_index)
FROM generate_series($3::int,$4::int) AS tiers(tier_index)
LEFT JOIN
(SELECT sale_block, sale_tx_index, node_id, tier_index, delegated_to, owner_public_key, purchase_tx_hash, delegate_tx_hash
FROM nodes
WHERE sale_block = $1 AND
sale_tx_index= $2)
AS nodes ON tiers.tier_index = nodes.tier_index
GROUP BY tiers.tier_index
ORDER BY tiers.tier_index
`
type GetNodeCountByTierIndexParams struct {
SaleBlock int64
SaleTxIndex int32
FromTier int32
ToTier int32
}
type GetNodeCountByTierIndexRow struct {
TierIndex int32
Count int64
}
func (q *Queries) GetNodeCountByTierIndex(ctx context.Context, arg GetNodeCountByTierIndexParams) ([]GetNodeCountByTierIndexRow, error) {
rows, err := q.db.Query(ctx, getNodeCountByTierIndex,
arg.SaleBlock,
arg.SaleTxIndex,
arg.FromTier,
arg.ToTier,
)
if err != nil {
return nil, err
}
defer rows.Close()
var items []GetNodeCountByTierIndexRow
for rows.Next() {
var i GetNodeCountByTierIndexRow
if err := rows.Scan(&i.TierIndex, &i.Count); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getNodesByDeployment = `-- name: GetNodesByDeployment :many
SELECT sale_block, sale_tx_index, node_id, tier_index, delegated_to, owner_public_key, purchase_tx_hash, delegate_tx_hash
FROM nodes
WHERE sale_block = $1 AND
sale_tx_index = $2
`
type GetNodesByDeploymentParams struct {
SaleBlock int64
SaleTxIndex int32
}
func (q *Queries) GetNodesByDeployment(ctx context.Context, arg GetNodesByDeploymentParams) ([]Node, error) {
rows, err := q.db.Query(ctx, getNodesByDeployment, arg.SaleBlock, arg.SaleTxIndex)
if err != nil {
return nil, err
}
defer rows.Close()
var items []Node
for rows.Next() {
var i Node
if err := rows.Scan(
&i.SaleBlock,
&i.SaleTxIndex,
&i.NodeID,
&i.TierIndex,
&i.DelegatedTo,
&i.OwnerPublicKey,
&i.PurchaseTxHash,
&i.DelegateTxHash,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getNodesByIds = `-- name: GetNodesByIds :many
SELECT sale_block, sale_tx_index, node_id, tier_index, delegated_to, owner_public_key, purchase_tx_hash, delegate_tx_hash
FROM nodes
WHERE sale_block = $1 AND
sale_tx_index = $2 AND
node_id = ANY ($3::int[])
`
type GetNodesByIdsParams struct {
SaleBlock int64
SaleTxIndex int32
NodeIds []int32
}
func (q *Queries) GetNodesByIds(ctx context.Context, arg GetNodesByIdsParams) ([]Node, error) {
rows, err := q.db.Query(ctx, getNodesByIds, arg.SaleBlock, arg.SaleTxIndex, arg.NodeIds)
if err != nil {
return nil, err
}
defer rows.Close()
var items []Node
for rows.Next() {
var i Node
if err := rows.Scan(
&i.SaleBlock,
&i.SaleTxIndex,
&i.NodeID,
&i.TierIndex,
&i.DelegatedTo,
&i.OwnerPublicKey,
&i.PurchaseTxHash,
&i.DelegateTxHash,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getNodesByOwner = `-- name: GetNodesByOwner :many
SELECT sale_block, sale_tx_index, node_id, tier_index, delegated_to, owner_public_key, purchase_tx_hash, delegate_tx_hash
FROM nodes
WHERE sale_block = $1 AND
sale_tx_index = $2 AND
owner_public_key = $3
ORDER BY tier_index
`
type GetNodesByOwnerParams struct {
SaleBlock int64
SaleTxIndex int32
OwnerPublicKey string
}
func (q *Queries) GetNodesByOwner(ctx context.Context, arg GetNodesByOwnerParams) ([]Node, error) {
rows, err := q.db.Query(ctx, getNodesByOwner, arg.SaleBlock, arg.SaleTxIndex, arg.OwnerPublicKey)
if err != nil {
return nil, err
}
defer rows.Close()
var items []Node
for rows.Next() {
var i Node
if err := rows.Scan(
&i.SaleBlock,
&i.SaleTxIndex,
&i.NodeID,
&i.TierIndex,
&i.DelegatedTo,
&i.OwnerPublicKey,
&i.PurchaseTxHash,
&i.DelegateTxHash,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getNodesByPubkey = `-- name: GetNodesByPubkey :many
SELECT nodes.sale_block, nodes.sale_tx_index, nodes.node_id, nodes.tier_index, nodes.delegated_to, nodes.owner_public_key, nodes.purchase_tx_hash, nodes.delegate_tx_hash
FROM nodes JOIN events ON nodes.purchase_tx_hash = events.tx_hash
WHERE sale_block = $1 AND
sale_tx_index = $2 AND
owner_public_key = $3 AND
delegated_to = $4
`
type GetNodesByPubkeyParams struct {
SaleBlock int64
SaleTxIndex int32
OwnerPublicKey string
DelegatedTo string
}
func (q *Queries) GetNodesByPubkey(ctx context.Context, arg GetNodesByPubkeyParams) ([]Node, error) {
rows, err := q.db.Query(ctx, getNodesByPubkey,
arg.SaleBlock,
arg.SaleTxIndex,
arg.OwnerPublicKey,
arg.DelegatedTo,
)
if err != nil {
return nil, err
}
defer rows.Close()
var items []Node
for rows.Next() {
var i Node
if err := rows.Scan(
&i.SaleBlock,
&i.SaleTxIndex,
&i.NodeID,
&i.TierIndex,
&i.DelegatedTo,
&i.OwnerPublicKey,
&i.PurchaseTxHash,
&i.DelegateTxHash,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const setDelegates = `-- name: SetDelegates :execrows
UPDATE nodes
SET delegated_to = $4, delegate_tx_hash = $3
WHERE sale_block = $1 AND
sale_tx_index = $2 AND
node_id = ANY ($5::int[])
`
type SetDelegatesParams struct {
SaleBlock int64
SaleTxIndex int32
DelegateTxHash string
Delegatee string
NodeIds []int32
}
func (q *Queries) SetDelegates(ctx context.Context, arg SetDelegatesParams) (int64, error) {
result, err := q.db.Exec(ctx, setDelegates,
arg.SaleBlock,
arg.SaleTxIndex,
arg.DelegateTxHash,
arg.Delegatee,
arg.NodeIds,
)
if err != nil {
return 0, err
}
return result.RowsAffected(), nil
}

View File

@@ -0,0 +1,92 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.27.0
// source: nodesales.sql
package gen
import (
"context"
"github.com/jackc/pgx/v5/pgtype"
)
const createNodeSale = `-- name: CreateNodeSale :exec
INSERT INTO node_sales ("block_height", "tx_index", "name", "starts_at", "ends_at", "tiers", "seller_public_key", "max_per_address", "deploy_tx_hash", "max_discount_percentage", "seller_wallet")
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)
`
type CreateNodeSaleParams struct {
BlockHeight int64
TxIndex int32
Name string
StartsAt pgtype.Timestamp
EndsAt pgtype.Timestamp
Tiers [][]byte
SellerPublicKey string
MaxPerAddress int32
DeployTxHash string
MaxDiscountPercentage int32
SellerWallet string
}
func (q *Queries) CreateNodeSale(ctx context.Context, arg CreateNodeSaleParams) error {
_, err := q.db.Exec(ctx, createNodeSale,
arg.BlockHeight,
arg.TxIndex,
arg.Name,
arg.StartsAt,
arg.EndsAt,
arg.Tiers,
arg.SellerPublicKey,
arg.MaxPerAddress,
arg.DeployTxHash,
arg.MaxDiscountPercentage,
arg.SellerWallet,
)
return err
}
const getNodeSale = `-- name: GetNodeSale :many
SELECT block_height, tx_index, name, starts_at, ends_at, tiers, seller_public_key, max_per_address, deploy_tx_hash, max_discount_percentage, seller_wallet
FROM node_sales
WHERE block_height = $1 AND
tx_index = $2
`
type GetNodeSaleParams struct {
BlockHeight int64
TxIndex int32
}
func (q *Queries) GetNodeSale(ctx context.Context, arg GetNodeSaleParams) ([]NodeSale, error) {
rows, err := q.db.Query(ctx, getNodeSale, arg.BlockHeight, arg.TxIndex)
if err != nil {
return nil, err
}
defer rows.Close()
var items []NodeSale
for rows.Next() {
var i NodeSale
if err := rows.Scan(
&i.BlockHeight,
&i.TxIndex,
&i.Name,
&i.StartsAt,
&i.EndsAt,
&i.Tiers,
&i.SellerPublicKey,
&i.MaxPerAddress,
&i.DeployTxHash,
&i.MaxDiscountPercentage,
&i.SellerWallet,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}

View File

@@ -0,0 +1,20 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.27.0
// source: test.sql
package gen
import (
"context"
)
const clearEvents = `-- name: ClearEvents :exec
DELETE FROM events
WHERE tx_hash <> ''
`
func (q *Queries) ClearEvents(ctx context.Context) error {
_, err := q.db.Exec(ctx, clearEvents)
return err
}

View File

@@ -0,0 +1,74 @@
package postgres
import (
"encoding/json"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
"github.com/gaze-network/indexer-network/modules/nodesale/repository/postgres/gen"
"github.com/samber/lo"
)
func mapNodes(nodes []gen.Node) []entity.Node {
return lo.Map(nodes, func(item gen.Node, index int) entity.Node {
return entity.Node{
SaleBlock: uint64(item.SaleBlock),
SaleTxIndex: uint32(item.SaleTxIndex),
NodeID: uint32(item.NodeID),
TierIndex: item.TierIndex,
DelegatedTo: item.DelegatedTo,
OwnerPublicKey: item.OwnerPublicKey,
PurchaseTxHash: item.PurchaseTxHash,
DelegateTxHash: item.DelegateTxHash,
}
})
}
func mapNodeSales(nodeSales []gen.NodeSale) []entity.NodeSale {
return lo.Map(nodeSales, func(item gen.NodeSale, index int) entity.NodeSale {
return entity.NodeSale{
BlockHeight: uint64(item.BlockHeight),
TxIndex: uint32(item.TxIndex),
Name: item.Name,
StartsAt: item.StartsAt.Time,
EndsAt: item.EndsAt.Time,
Tiers: item.Tiers,
SellerPublicKey: item.SellerPublicKey,
MaxPerAddress: uint32(item.MaxPerAddress),
DeployTxHash: item.DeployTxHash,
MaxDiscountPercentage: item.MaxDiscountPercentage,
SellerWallet: item.SellerWallet,
}
})
}
func mapNodeCountByTierIndexRows(nodeCount []gen.GetNodeCountByTierIndexRow) []datagateway.GetNodeCountByTierIndexRow {
return lo.Map(nodeCount, func(item gen.GetNodeCountByTierIndexRow, index int) datagateway.GetNodeCountByTierIndexRow {
return datagateway.GetNodeCountByTierIndexRow{
TierIndex: item.TierIndex,
}
})
}
func mapNodeSalesEvents(events []gen.Event) []entity.NodeSaleEvent {
return lo.Map(events, func(item gen.Event, index int) entity.NodeSaleEvent {
var meta entity.MetadataEventPurchase
err := json.Unmarshal(item.Metadata, &meta)
if err != nil {
meta = entity.MetadataEventPurchase{}
}
return entity.NodeSaleEvent{
TxHash: item.TxHash,
BlockHeight: item.BlockHeight,
TxIndex: item.TxIndex,
WalletAddress: item.WalletAddress,
Valid: item.Valid,
Action: item.Action,
RawMessage: item.RawMessage,
ParsedMessage: item.ParsedMessage,
BlockTimestamp: item.BlockTimestamp.Time.UTC(),
BlockHash: item.BlockHash,
Metadata: &meta,
}
})
}

View File

@@ -0,0 +1,247 @@
package postgres
import (
"context"
"encoding/json"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/internal/postgres"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
"github.com/gaze-network/indexer-network/modules/nodesale/repository/postgres/gen"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgtype"
"github.com/samber/lo"
)
type Repository struct {
db postgres.DB
queries *gen.Queries
tx pgx.Tx
}
func NewRepository(db postgres.DB) *Repository {
return &Repository{
db: db,
queries: gen.New(db),
}
}
func (repo *Repository) CreateBlock(ctx context.Context, arg entity.Block) error {
err := repo.queries.CreateBlock(ctx, gen.CreateBlockParams{
BlockHeight: arg.BlockHeight,
BlockHash: arg.BlockHash,
Module: arg.Module,
})
if err != nil {
return errors.Wrap(err, "Cannot Add block")
}
return nil
}
func (repo *Repository) GetBlock(ctx context.Context, blockHeight int64) (*entity.Block, error) {
block, err := repo.queries.GetBlock(ctx, blockHeight)
if err != nil {
return nil, errors.Wrap(err, "Cannot get block")
}
return &entity.Block{
BlockHeight: block.BlockHeight,
BlockHash: block.BlockHash,
Module: block.Module,
}, nil
}
func (repo *Repository) GetLastProcessedBlock(ctx context.Context) (*entity.Block, error) {
block, err := repo.queries.GetLastProcessedBlock(ctx)
if err != nil {
return nil, errors.Wrap(err, "Cannot get last processed block")
}
return &entity.Block{
BlockHeight: block.BlockHeight,
BlockHash: block.BlockHash,
Module: block.Module,
}, nil
}
func (repo *Repository) RemoveBlockFrom(ctx context.Context, fromBlock int64) (int64, error) {
affected, err := repo.queries.RemoveBlockFrom(ctx, fromBlock)
if err != nil {
return 0, errors.Wrap(err, "Cannot remove blocks")
}
return affected, nil
}
func (repo *Repository) RemoveEventsFromBlock(ctx context.Context, fromBlock int64) (int64, error) {
affected, err := repo.queries.RemoveEventsFromBlock(ctx, fromBlock)
if err != nil {
return 0, errors.Wrap(err, "Cannot remove events")
}
return affected, nil
}
func (repo *Repository) ClearDelegate(ctx context.Context) (int64, error) {
affected, err := repo.queries.ClearDelegate(ctx)
if err != nil {
return 0, errors.Wrap(err, "Cannot clear delegate")
}
return affected, nil
}
func (repo *Repository) GetNodesByIds(ctx context.Context, arg datagateway.GetNodesByIdsParams) ([]entity.Node, error) {
nodes, err := repo.queries.GetNodesByIds(ctx, gen.GetNodesByIdsParams{
SaleBlock: int64(arg.SaleBlock),
SaleTxIndex: int32(arg.SaleTxIndex),
NodeIds: lo.Map(arg.NodeIds, func(item uint32, index int) int32 { return int32(item) }),
})
if err != nil {
return nil, errors.Wrap(err, "Cannot get nodes")
}
return mapNodes(nodes), nil
}
func (repo *Repository) CreateEvent(ctx context.Context, arg entity.NodeSaleEvent) error {
metaDataBytes := []byte("{}")
if arg.Metadata != nil {
metaDataBytes, _ = json.Marshal(arg.Metadata)
}
err := repo.queries.CreateEvent(ctx, gen.CreateEventParams{
TxHash: arg.TxHash,
BlockHeight: arg.BlockHeight,
TxIndex: arg.TxIndex,
WalletAddress: arg.WalletAddress,
Valid: arg.Valid,
Action: arg.Action,
RawMessage: arg.RawMessage,
ParsedMessage: arg.ParsedMessage,
BlockTimestamp: pgtype.Timestamp{Time: arg.BlockTimestamp.UTC(), Valid: true},
BlockHash: arg.BlockHash,
Metadata: metaDataBytes,
Reason: arg.Reason,
})
if err != nil {
return errors.Wrap(err, "Cannot add event")
}
return nil
}
func (repo *Repository) SetDelegates(ctx context.Context, arg datagateway.SetDelegatesParams) (int64, error) {
affected, err := repo.queries.SetDelegates(ctx, gen.SetDelegatesParams{
SaleBlock: int64(arg.SaleBlock),
SaleTxIndex: arg.SaleTxIndex,
Delegatee: arg.Delegatee,
DelegateTxHash: arg.DelegateTxHash,
NodeIds: lo.Map(arg.NodeIds, func(item uint32, index int) int32 { return int32(item) }),
})
if err != nil {
return 0, errors.Wrap(err, "Cannot set delegate")
}
return affected, nil
}
func (repo *Repository) CreateNodeSale(ctx context.Context, arg entity.NodeSale) error {
err := repo.queries.CreateNodeSale(ctx, gen.CreateNodeSaleParams{
BlockHeight: int64(arg.BlockHeight),
TxIndex: int32(arg.TxIndex),
Name: arg.Name,
StartsAt: pgtype.Timestamp{Time: arg.StartsAt.UTC(), Valid: true},
EndsAt: pgtype.Timestamp{Time: arg.EndsAt.UTC(), Valid: true},
Tiers: arg.Tiers,
SellerPublicKey: arg.SellerPublicKey,
MaxPerAddress: int32(arg.MaxPerAddress),
DeployTxHash: arg.DeployTxHash,
MaxDiscountPercentage: arg.MaxDiscountPercentage,
SellerWallet: arg.SellerWallet,
})
if err != nil {
return errors.Wrap(err, "Cannot add NodeSale")
}
return nil
}
func (repo *Repository) GetNodeSale(ctx context.Context, arg datagateway.GetNodeSaleParams) ([]entity.NodeSale, error) {
nodeSales, err := repo.queries.GetNodeSale(ctx, gen.GetNodeSaleParams{
BlockHeight: int64(arg.BlockHeight),
TxIndex: int32(arg.TxIndex),
})
if err != nil {
return nil, errors.Wrap(err, "Cannot get NodeSale")
}
return mapNodeSales(nodeSales), nil
}
func (repo *Repository) GetNodesByOwner(ctx context.Context, arg datagateway.GetNodesByOwnerParams) ([]entity.Node, error) {
nodes, err := repo.queries.GetNodesByOwner(ctx, gen.GetNodesByOwnerParams{
SaleBlock: int64(arg.SaleBlock),
SaleTxIndex: int32(arg.SaleTxIndex),
OwnerPublicKey: arg.OwnerPublicKey,
})
if err != nil {
return nil, errors.Wrap(err, "Cannot get nodes by owner")
}
return mapNodes(nodes), nil
}
func (repo *Repository) CreateNode(ctx context.Context, arg entity.Node) error {
err := repo.queries.CreateNode(ctx, gen.CreateNodeParams{
SaleBlock: int64(arg.SaleBlock),
SaleTxIndex: int32(arg.SaleTxIndex),
NodeID: int32(arg.NodeID),
TierIndex: arg.TierIndex,
DelegatedTo: arg.DelegatedTo,
OwnerPublicKey: arg.OwnerPublicKey,
PurchaseTxHash: arg.PurchaseTxHash,
DelegateTxHash: arg.DelegateTxHash,
})
if err != nil {
return errors.Wrap(err, "Cannot add node")
}
return nil
}
func (repo *Repository) GetNodeCountByTierIndex(ctx context.Context, arg datagateway.GetNodeCountByTierIndexParams) ([]datagateway.GetNodeCountByTierIndexRow, error) {
nodeCount, err := repo.queries.GetNodeCountByTierIndex(ctx, gen.GetNodeCountByTierIndexParams{
SaleBlock: int64(arg.SaleBlock),
SaleTxIndex: int32(arg.SaleTxIndex),
FromTier: int32(arg.FromTier),
ToTier: int32(arg.ToTier),
})
if err != nil {
return nil, errors.Wrap(err, "Cannot get node count by tier index")
}
return mapNodeCountByTierIndexRows(nodeCount), nil
}
func (repo *Repository) GetNodesByPubkey(ctx context.Context, arg datagateway.GetNodesByPubkeyParams) ([]entity.Node, error) {
nodes, err := repo.queries.GetNodesByPubkey(ctx, gen.GetNodesByPubkeyParams{
SaleBlock: arg.SaleBlock,
SaleTxIndex: arg.SaleTxIndex,
OwnerPublicKey: arg.OwnerPublicKey,
DelegatedTo: arg.DelegatedTo,
})
if err != nil {
return nil, errors.Wrap(err, "Cannot get nodes by public key")
}
return mapNodes(nodes), nil
}
func (repo *Repository) GetEventsByWallet(ctx context.Context, walletAddress string) ([]entity.NodeSaleEvent, error) {
events, err := repo.queries.GetEventsByWallet(ctx, walletAddress)
if err != nil {
return nil, errors.Wrap(err, "cannot get events by wallet")
}
return mapNodeSalesEvents(events), nil
}
func (repo *Repository) GetNodesByDeployment(ctx context.Context, saleBlock int64, saleTxIndex int32) ([]entity.Node, error) {
nodes, err := repo.queries.GetNodesByDeployment(ctx, gen.GetNodesByDeploymentParams{
SaleBlock: saleBlock,
SaleTxIndex: saleTxIndex,
})
if err != nil {
return nil, errors.Wrap(err, "cannot get nodes by deploy")
}
return mapNodes(nodes), nil
}

View File

@@ -0,0 +1,62 @@
package postgres
import (
"context"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/jackc/pgx/v5"
)
var ErrTxAlreadyExists = errors.New("Transaction already exists. Call Commit() or Rollback() first.")
func (r *Repository) begin(ctx context.Context) (*Repository, error) {
if r.tx != nil {
return nil, errors.WithStack(ErrTxAlreadyExists)
}
tx, err := r.db.Begin(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to begin transaction")
}
return &Repository{
db: r.db,
queries: r.queries.WithTx(tx),
tx: tx,
}, nil
}
func (r *Repository) BeginNodeSaleTx(ctx context.Context) (datagateway.NodeSaleDataGatewayWithTx, error) {
repo, err := r.begin(ctx)
if err != nil {
return nil, errors.WithStack(err)
}
return repo, nil
}
func (r *Repository) Commit(ctx context.Context) error {
if r.tx == nil {
return nil
}
err := r.tx.Commit(ctx)
if err != nil {
return errors.Wrap(err, "failed to commit transaction")
}
r.tx = nil
return nil
}
func (r *Repository) Rollback(ctx context.Context) error {
if r.tx == nil {
return nil
}
err := r.tx.Rollback(ctx)
if err != nil && !errors.Is(err, pgx.ErrTxClosed) {
return errors.Wrap(err, "failed to rollback transaction")
}
if err == nil {
logger.DebugContext(ctx, "rolled back transaction")
}
r.tx = nil
return nil
}

View File

@@ -0,0 +1,25 @@
package nodesale
import "github.com/btcsuite/btcd/txscript"
func extractTapScript(witness [][]byte) (tokenizer txscript.ScriptTokenizer, controlBlock *txscript.ControlBlock, isTapScript bool) {
witness = removeAnnexFromWitness(witness)
if len(witness) < 2 {
return txscript.ScriptTokenizer{}, nil, false
}
script := witness[len(witness)-2]
rawControl := witness[len(witness)-1]
parsedControl, err := txscript.ParseControlBlock(rawControl)
if err != nil {
return txscript.ScriptTokenizer{}, nil, false
}
return txscript.MakeScriptTokenizer(0, script), parsedControl, true
}
func removeAnnexFromWitness(witness [][]byte) [][]byte {
if len(witness) >= 2 && len(witness[len(witness)-1]) > 0 && witness[len(witness)-1][0] == txscript.TaprootAnnexTag {
return witness[:len(witness)-1]
}
return witness
}

View File

@@ -1,29 +1,49 @@
package httphandler
import (
"slices"
"net/url"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
"github.com/gofiber/fiber/v2"
"github.com/samber/lo"
)
type getBalancesByAddressRequest struct {
type getBalancesRequest struct {
paginationRequest
Wallet string `params:"wallet"`
Id string `query:"id"`
BlockHeight uint64 `query:"blockHeight"`
}
func (r getBalancesByAddressRequest) Validate() error {
const (
getBalancesMaxLimit = 5000
getBalancesDefaultLimit = 100
)
func (r *getBalancesRequest) Validate() error {
var errList []error
if r.Wallet == "" {
errList = append(errList, errors.New("'wallet' is required"))
}
if r.Id != "" && !isRuneIdOrRuneName(r.Id) {
errList = append(errList, errors.New("'id' is not valid rune id or rune name"))
if r.Id != "" {
id, err := url.QueryUnescape(r.Id)
if err != nil {
return errors.WithStack(err)
}
r.Id = id
if !isRuneIdOrRuneName(r.Id) {
errList = append(errList, errors.Errorf("id '%s' is not valid rune id or rune name", r.Id))
}
}
if r.Limit < 0 {
errList = append(errList, errors.New("'limit' must be non-negative"))
}
if r.Limit > getBalancesMaxLimit {
errList = append(errList, errors.Errorf("'limit' cannot exceed %d", getBalancesMaxLimit))
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
@@ -36,15 +56,15 @@ type balance struct {
Decimals uint8 `json:"decimals"`
}
type getBalancesByAddressResult struct {
type getBalancesResult struct {
List []balance `json:"list"`
BlockHeight uint64 `json:"blockHeight"`
}
type getBalancesByAddressResponse = HttpResponse[getBalancesByAddressResult]
type getBalancesResponse = HttpResponse[getBalancesResult]
func (h *HttpHandler) GetBalancesByAddress(ctx *fiber.Ctx) (err error) {
var req getBalancesByAddressRequest
func (h *HttpHandler) GetBalances(ctx *fiber.Ctx) (err error) {
var req getBalancesRequest
if err := ctx.ParamsParser(&req); err != nil {
return errors.WithStack(err)
}
@@ -54,6 +74,9 @@ func (h *HttpHandler) GetBalancesByAddress(ctx *fiber.Ctx) (err error) {
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
if err := req.ParseDefault(); err != nil {
return errors.WithStack(err)
}
pkScript, ok := resolvePkScript(h.network, req.Wallet)
if !ok {
@@ -64,49 +87,52 @@ func (h *HttpHandler) GetBalancesByAddress(ctx *fiber.Ctx) (err error) {
if blockHeight == 0 {
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("latest block not found")
}
return errors.Wrap(err, "error during GetLatestBlock")
}
blockHeight = uint64(blockHeader.Height)
}
balances, err := h.usecase.GetBalancesByPkScript(ctx.UserContext(), pkScript, blockHeight)
balances, err := h.usecase.GetBalancesByPkScript(ctx.UserContext(), pkScript, blockHeight, req.Limit, req.Offset)
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("balances not found")
}
return errors.Wrap(err, "error during GetBalancesByPkScript")
}
runeId, ok := h.resolveRuneId(ctx.UserContext(), req.Id)
if ok {
// filter out balances that don't match the requested rune id
for key := range balances {
if key != runeId {
delete(balances, key)
}
}
balances = lo.Filter(balances, func(b *entity.Balance, _ int) bool {
return b.RuneId == runeId
})
}
balanceRuneIds := lo.Keys(balances)
balanceRuneIds := lo.Map(balances, func(b *entity.Balance, _ int) runes.RuneId {
return b.RuneId
})
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx.UserContext(), balanceRuneIds)
if err != nil {
return errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch")
}
balanceList := make([]balance, 0, len(balances))
for id, b := range balances {
runeEntry := runeEntries[id]
for _, b := range balances {
runeEntry := runeEntries[b.RuneId]
balanceList = append(balanceList, balance{
Amount: b.Amount,
Id: id,
Id: b.RuneId,
Name: runeEntry.SpacedRune,
Symbol: string(runeEntry.Symbol),
Decimals: runeEntry.Divisibility,
})
}
slices.SortFunc(balanceList, func(i, j balance) int {
return j.Amount.Cmp(i.Amount)
})
resp := getBalancesByAddressResponse{
Result: &getBalancesByAddressResult{
resp := getBalancesResponse{
Result: &getBalancesResult{
BlockHeight: blockHeight,
List: balanceList,
},

View File

@@ -3,10 +3,11 @@ package httphandler
import (
"context"
"fmt"
"slices"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gofiber/fiber/v2"
"github.com/samber/lo"
"golang.org/x/sync/errgroup"
@@ -16,33 +17,49 @@ type getBalanceQuery struct {
Wallet string `json:"wallet"`
Id string `json:"id"`
BlockHeight uint64 `json:"blockHeight"`
Limit int32 `json:"limit"`
Offset int32 `json:"offset"`
}
type getBalancesByAddressBatchRequest struct {
type getBalancesBatchRequest struct {
Queries []getBalanceQuery `json:"queries"`
}
func (r getBalancesByAddressBatchRequest) Validate() error {
const getBalancesBatchMaxQueries = 100
func (r getBalancesBatchRequest) Validate() error {
var errList []error
for _, query := range r.Queries {
if len(r.Queries) == 0 {
errList = append(errList, errors.New("at least one query is required"))
}
if len(r.Queries) > getBalancesBatchMaxQueries {
errList = append(errList, errors.Errorf("cannot exceed %d queries", getBalancesBatchMaxQueries))
}
for i, query := range r.Queries {
if query.Wallet == "" {
errList = append(errList, errors.Errorf("queries[%d]: 'wallet' is required"))
errList = append(errList, errors.Errorf("queries[%d]: 'wallet' is required", i))
}
if query.Id != "" && !isRuneIdOrRuneName(query.Id) {
errList = append(errList, errors.Errorf("queries[%d]: 'id' is not valid rune id or rune name"))
errList = append(errList, errors.Errorf("queries[%d]: id '%s' is not valid rune id or rune name", i, query.Id))
}
if query.Limit < 0 {
errList = append(errList, errors.Errorf("queries[%d]: 'limit' must be non-negative", i))
}
if query.Limit > getBalancesMaxLimit {
errList = append(errList, errors.Errorf("queries[%d]: 'limit' cannot exceed %d", i, getBalancesMaxLimit))
}
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
type getBalancesByAddressBatchResult struct {
List []*getBalancesByAddressResult `json:"list"`
type getBalancesBatchResult struct {
List []*getBalancesResult `json:"list"`
}
type getBalancesByAddressBatchResponse = HttpResponse[getBalancesByAddressBatchResult]
type getBalancesBatchResponse = HttpResponse[getBalancesBatchResult]
func (h *HttpHandler) GetBalancesByAddressBatch(ctx *fiber.Ctx) (err error) {
var req getBalancesByAddressBatchRequest
func (h *HttpHandler) GetBalancesBatch(ctx *fiber.Ctx) (err error) {
var req getBalancesBatchRequest
if err := ctx.BodyParser(&req); err != nil {
return errors.WithStack(err)
}
@@ -53,11 +70,14 @@ func (h *HttpHandler) GetBalancesByAddressBatch(ctx *fiber.Ctx) (err error) {
var latestBlockHeight uint64
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("latest block not found")
}
return errors.Wrap(err, "error during GetLatestBlock")
}
latestBlockHeight = uint64(blockHeader.Height)
processQuery := func(ctx context.Context, query getBalanceQuery, queryIndex int) (*getBalancesByAddressResult, error) {
processQuery := func(ctx context.Context, query getBalanceQuery, queryIndex int) (*getBalancesResult, error) {
pkScript, ok := resolvePkScript(h.network, query.Wallet)
if !ok {
return nil, errs.NewPublicError(fmt.Sprintf("unable to resolve pkscript from \"queries[%d].wallet\"", queryIndex))
@@ -68,50 +88,57 @@ func (h *HttpHandler) GetBalancesByAddressBatch(ctx *fiber.Ctx) (err error) {
blockHeight = latestBlockHeight
}
balances, err := h.usecase.GetBalancesByPkScript(ctx, pkScript, blockHeight)
if query.Limit == 0 {
query.Limit = getBalancesDefaultLimit
}
balances, err := h.usecase.GetBalancesByPkScript(ctx, pkScript, blockHeight, query.Limit, query.Offset)
if err != nil {
if errors.Is(err, errs.NotFound) {
return nil, errs.NewPublicError("balances not found")
}
return nil, errors.Wrap(err, "error during GetBalancesByPkScript")
}
runeId, ok := h.resolveRuneId(ctx, query.Id)
if ok {
// filter out balances that don't match the requested rune id
for key := range balances {
if key != runeId {
delete(balances, key)
}
}
balances = lo.Filter(balances, func(b *entity.Balance, _ int) bool {
return b.RuneId == runeId
})
}
balanceRuneIds := lo.Keys(balances)
balanceRuneIds := lo.Map(balances, func(b *entity.Balance, _ int) runes.RuneId {
return b.RuneId
})
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx, balanceRuneIds)
if err != nil {
if errors.Is(err, errs.NotFound) {
return nil, errs.NewPublicError("rune not found")
}
return nil, errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch")
}
balanceList := make([]balance, 0, len(balances))
for id, b := range balances {
runeEntry := runeEntries[id]
for _, b := range balances {
runeEntry := runeEntries[b.RuneId]
balanceList = append(balanceList, balance{
Amount: b.Amount,
Id: id,
Id: b.RuneId,
Name: runeEntry.SpacedRune,
Symbol: string(runeEntry.Symbol),
Decimals: runeEntry.Divisibility,
})
}
slices.SortFunc(balanceList, func(i, j balance) int {
return j.Amount.Cmp(i.Amount)
})
result := getBalancesByAddressResult{
result := getBalancesResult{
BlockHeight: blockHeight,
List: balanceList,
}
return &result, nil
}
results := make([]*getBalancesByAddressResult, len(req.Queries))
results := make([]*getBalancesResult, len(req.Queries))
eg, ectx := errgroup.WithContext(ctx.UserContext())
for i, query := range req.Queries {
i := i
@@ -129,8 +156,8 @@ func (h *HttpHandler) GetBalancesByAddressBatch(ctx *fiber.Ctx) (err error) {
return errors.WithStack(err)
}
resp := getBalancesByAddressBatchResponse{
Result: &getBalancesByAddressBatchResult{
resp := getBalancesBatchResponse{
Result: &getBalancesBatchResult{
List: results,
},
}

View File

@@ -1,28 +1,12 @@
package httphandler
import (
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/runes/constants"
"github.com/gofiber/fiber/v2"
)
var startingBlockHeader = map[common.Network]types.BlockHeader{
common.NetworkMainnet: {
Height: 839999,
Hash: *utils.Must(chainhash.NewHashFromStr("0000000000000000000172014ba58d66455762add0512355ad651207918494ab")),
PrevBlock: *utils.Must(chainhash.NewHashFromStr("00000000000000000001dcce6ce7c8a45872cafd1fb04732b447a14a91832591")),
},
common.NetworkTestnet: {
Height: 2583200,
Hash: *utils.Must(chainhash.NewHashFromStr("000000000006c5f0dfcd9e0e81f27f97a87aef82087ffe69cd3c390325bb6541")),
PrevBlock: *utils.Must(chainhash.NewHashFromStr("00000000000668f3bafac992f53424774515440cb47e1cb9e73af3f496139e28")),
},
}
type getCurrentBlockResult struct {
Hash string `json:"hash"`
Height int64 `json:"height"`
@@ -36,7 +20,7 @@ func (h *HttpHandler) GetCurrentBlock(ctx *fiber.Ctx) (err error) {
if !errors.Is(err, errs.NotFound) {
return errors.Wrap(err, "error during GetLatestBlock")
}
blockHeader = startingBlockHeader[h.network]
blockHeader = constants.StartingBlockHeader[h.network]
}
resp := getCurrentBlockResponse{

View File

@@ -1,10 +1,15 @@
package httphandler
import (
"bytes"
"encoding/hex"
"fmt"
"net/url"
"slices"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
"github.com/gofiber/fiber/v2"
@@ -12,14 +17,30 @@ import (
)
type getHoldersRequest struct {
paginationRequest
Id string `params:"id"`
BlockHeight uint64 `query:"blockHeight"`
}
func (r getHoldersRequest) Validate() error {
const (
getHoldersMaxLimit = 1000
)
func (r *getHoldersRequest) Validate() error {
var errList []error
id, err := url.QueryUnescape(r.Id)
if err != nil {
return errors.WithStack(err)
}
r.Id = id
if !isRuneIdOrRuneName(r.Id) {
errList = append(errList, errors.New("'id' is not valid rune id or rune name"))
errList = append(errList, errors.Errorf("id '%s' is not valid rune id or rune name", r.Id))
}
if r.Limit < 0 {
errList = append(errList, errors.New("'limit' must be non-negative"))
}
if r.Limit > getHoldersMaxLimit {
errList = append(errList, errors.Errorf("'limit' cannot exceed %d", getHoldersMaxLimit))
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
@@ -35,6 +56,7 @@ type getHoldersResult struct {
BlockHeight uint64 `json:"blockHeight"`
TotalSupply uint128.Uint128 `json:"totalSupply"`
MintedAmount uint128.Uint128 `json:"mintedAmount"`
Decimals uint8 `json:"decimals"`
List []holdingBalance `json:"list"`
}
@@ -51,6 +73,9 @@ func (h *HttpHandler) GetHolders(ctx *fiber.Ctx) (err error) {
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
if err := req.ParseDefault(); err != nil {
return errors.WithStack(err)
}
blockHeight := req.BlockHeight
if blockHeight == 0 {
@@ -66,16 +91,22 @@ func (h *HttpHandler) GetHolders(ctx *fiber.Ctx) (err error) {
var ok bool
runeId, ok = h.resolveRuneId(ctx.UserContext(), req.Id)
if !ok {
return errs.NewPublicError("unable to resolve rune id from \"id\"")
return errs.NewPublicError(fmt.Sprintf("unable to resolve rune id \"%s\" from \"id\"", req.Id))
}
}
runeEntry, err := h.usecase.GetRuneEntryByRuneIdAndHeight(ctx.UserContext(), runeId, blockHeight)
if err != nil {
return errors.Wrap(err, "error during GetHoldersByHeight")
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("rune not found")
}
return errors.Wrap(err, "error during GetRuneEntryByRuneIdAndHeight")
}
holdingBalances, err := h.usecase.GetBalancesByRuneId(ctx.UserContext(), runeId, blockHeight)
holdingBalances, err := h.usecase.GetBalancesByRuneId(ctx.UserContext(), runeId, blockHeight, req.Limit, req.Offset)
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("balances not found")
}
return errors.Wrap(err, "error during GetBalancesByRuneId")
}
@@ -101,11 +132,20 @@ func (h *HttpHandler) GetHolders(ctx *fiber.Ctx) (err error) {
})
}
// sort by amount descending, then pk script ascending
slices.SortFunc(holdingBalances, func(b1, b2 *entity.Balance) int {
if b1.Amount.Cmp(b2.Amount) == 0 {
return bytes.Compare(b1.PkScript, b2.PkScript)
}
return b2.Amount.Cmp(b1.Amount)
})
resp := getHoldersResponse{
Result: &getHoldersResult{
BlockHeight: blockHeight,
TotalSupply: totalSupply,
MintedAmount: mintedAmount,
Decimals: runeEntry.Divisibility,
List: list,
},
}

View File

@@ -1,11 +1,12 @@
package httphandler
import (
"slices"
"fmt"
"net/url"
"strings"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
"github.com/gofiber/fiber/v2"
@@ -13,15 +14,29 @@ import (
)
type getTokenInfoRequest struct {
Id string `params:"id"`
BlockHeight uint64 `query:"blockHeight"`
Id string `params:"id"`
BlockHeight uint64 `query:"blockHeight"`
AdditionalFieldsRaw string `query:"additionalFields"` // comma-separated list of additional fields
AdditionalFields []string
}
func (r getTokenInfoRequest) Validate() error {
func (r *getTokenInfoRequest) Validate() error {
var errList []error
if !isRuneIdOrRuneName(r.Id) {
errList = append(errList, errors.New("'id' is not valid rune id or rune name"))
id, err := url.QueryUnescape(r.Id)
if err != nil {
return errors.WithStack(err)
}
r.Id = id
if !isRuneIdOrRuneName(r.Id) {
errList = append(errList, errors.Errorf("id '%s' is not valid rune id or rune name", r.Id))
}
if r.AdditionalFieldsRaw == "" {
// temporarily set default value for backward compatibility
r.AdditionalFieldsRaw = "holdersCount" // TODO: remove this default value after all clients are updated
}
r.AdditionalFields = strings.Split(r.AdditionalFieldsRaw, ",")
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
@@ -35,17 +50,19 @@ type entryTerms struct {
}
type entry struct {
Divisibility uint8 `json:"divisibility"`
Premine uint128.Uint128 `json:"premine"`
Rune runes.Rune `json:"rune"`
Spacers uint32 `json:"spacers"`
Symbol string `json:"symbol"`
Terms entryTerms `json:"terms"`
Turbo bool `json:"turbo"`
Divisibility uint8 `json:"divisibility"`
Premine uint128.Uint128 `json:"premine"`
Rune runes.Rune `json:"rune"`
Spacers uint32 `json:"spacers"`
Symbol string `json:"symbol"`
Terms entryTerms `json:"terms"`
Turbo bool `json:"turbo"`
EtchingTxHash string `json:"etchingTxHash"`
}
type tokenInfoExtend struct {
Entry entry `json:"entry"`
HoldersCount *int64 `json:"holdersCount,omitempty"`
Entry entry `json:"entry"`
}
type getTokenInfoResult struct {
@@ -57,11 +74,11 @@ type getTokenInfoResult struct {
MintedAmount uint128.Uint128 `json:"mintedAmount"`
BurnedAmount uint128.Uint128 `json:"burnedAmount"`
Decimals uint8 `json:"decimals"`
DeployedAt uint64 `json:"deployedAt"` // unix timestamp
DeployedAt int64 `json:"deployedAt"` // unix timestamp
DeployedAtHeight uint64 `json:"deployedAtHeight"`
CompletedAt *uint64 `json:"completedAt"` // unix timestamp
CompletedAt *int64 `json:"completedAt"` // unix timestamp
CompletedAtHeight *uint64 `json:"completedAtHeight"`
HoldersCount int `json:"holdersCount"`
HoldersCount int64 `json:"holdersCount"` // deprecated // TODO: remove later
Extend tokenInfoExtend `json:"extend"`
}
@@ -83,6 +100,9 @@ func (h *HttpHandler) GetTokenInfo(ctx *fiber.Ctx) (err error) {
if blockHeight == 0 {
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("latest block not found")
}
return errors.Wrap(err, "error during GetLatestBlock")
}
blockHeight = uint64(blockHeader.Height)
@@ -93,73 +113,87 @@ func (h *HttpHandler) GetTokenInfo(ctx *fiber.Ctx) (err error) {
var ok bool
runeId, ok = h.resolveRuneId(ctx.UserContext(), req.Id)
if !ok {
return errs.NewPublicError("unable to resolve rune id from \"id\"")
return errs.NewPublicError(fmt.Sprintf("unable to resolve rune id \"%s\" from \"id\"", req.Id))
}
}
runeEntry, err := h.usecase.GetRuneEntryByRuneIdAndHeight(ctx.UserContext(), runeId, blockHeight)
if err != nil {
return errors.Wrap(err, "error during GetTokenInfoByHeight")
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("rune not found")
}
return errors.Wrap(err, "error during GetRuneEntryByRuneIdAndHeight")
}
holdingBalances, err := h.usecase.GetBalancesByRuneId(ctx.UserContext(), runeId, blockHeight)
if err != nil {
return errors.Wrap(err, "error during GetBalancesByRuneId")
var holdersCountPtr *int64
if lo.Contains(req.AdditionalFields, "holdersCount") {
holdersCount, err := h.usecase.GetTotalHoldersByRuneId(ctx.UserContext(), runeId, blockHeight)
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("rune not found")
}
return errors.Wrap(err, "error during GetBalancesByRuneId")
}
holdersCountPtr = &holdersCount
}
holdingBalances = lo.Filter(holdingBalances, func(b *entity.Balance, _ int) bool {
return !b.Amount.IsZero()
})
// sort by amount descending
slices.SortFunc(holdingBalances, func(i, j *entity.Balance) int {
return j.Amount.Cmp(i.Amount)
})
totalSupply, err := runeEntry.Supply()
result, err := createTokenInfoResult(runeEntry, holdersCountPtr)
if err != nil {
return errors.Wrap(err, "cannot get total supply of rune")
return errors.Wrap(err, "error during createTokenInfoResult")
}
mintedAmount, err := runeEntry.MintedAmount()
if err != nil {
return errors.Wrap(err, "cannot get minted amount of rune")
}
circulatingSupply := mintedAmount.Sub(runeEntry.BurnedAmount)
terms := lo.FromPtr(runeEntry.Terms)
resp := getTokenInfoResponse{
Result: &getTokenInfoResult{
Id: runeId,
Name: runeEntry.SpacedRune,
Symbol: string(runeEntry.Symbol),
TotalSupply: totalSupply,
CirculatingSupply: circulatingSupply,
MintedAmount: mintedAmount,
BurnedAmount: runeEntry.BurnedAmount,
Decimals: runeEntry.Divisibility,
DeployedAt: uint64(runeEntry.EtchedAt.Unix()),
DeployedAtHeight: runeEntry.EtchingBlock,
CompletedAt: lo.Ternary(runeEntry.CompletedAt.IsZero(), nil, lo.ToPtr(uint64(runeEntry.CompletedAt.Unix()))),
CompletedAtHeight: runeEntry.CompletedAtHeight,
HoldersCount: len(holdingBalances),
Extend: tokenInfoExtend{
Entry: entry{
Divisibility: runeEntry.Divisibility,
Premine: runeEntry.Premine,
Rune: runeEntry.SpacedRune.Rune,
Spacers: runeEntry.SpacedRune.Spacers,
Symbol: string(runeEntry.Symbol),
Terms: entryTerms{
Amount: lo.FromPtr(terms.Amount),
Cap: lo.FromPtr(terms.Cap),
HeightStart: terms.HeightStart,
HeightEnd: terms.HeightEnd,
OffsetStart: terms.OffsetStart,
OffsetEnd: terms.OffsetEnd,
},
Turbo: runeEntry.Turbo,
},
},
},
Result: result,
}
return errors.WithStack(ctx.JSON(resp))
}
func createTokenInfoResult(runeEntry *runes.RuneEntry, holdersCount *int64) (*getTokenInfoResult, error) {
totalSupply, err := runeEntry.Supply()
if err != nil {
return nil, errors.Wrap(err, "cannot get total supply of rune")
}
mintedAmount, err := runeEntry.MintedAmount()
if err != nil {
return nil, errors.Wrap(err, "cannot get minted amount of rune")
}
circulatingSupply := mintedAmount.Sub(runeEntry.BurnedAmount)
terms := lo.FromPtr(runeEntry.Terms)
return &getTokenInfoResult{
Id: runeEntry.RuneId,
Name: runeEntry.SpacedRune,
Symbol: string(runeEntry.Symbol),
TotalSupply: totalSupply,
CirculatingSupply: circulatingSupply,
MintedAmount: mintedAmount,
BurnedAmount: runeEntry.BurnedAmount,
Decimals: runeEntry.Divisibility,
DeployedAt: runeEntry.EtchedAt.Unix(),
DeployedAtHeight: runeEntry.EtchingBlock,
CompletedAt: lo.Ternary(runeEntry.CompletedAt.IsZero(), nil, lo.ToPtr(runeEntry.CompletedAt.Unix())),
CompletedAtHeight: runeEntry.CompletedAtHeight,
HoldersCount: lo.FromPtr(holdersCount),
Extend: tokenInfoExtend{
HoldersCount: holdersCount,
Entry: entry{
Divisibility: runeEntry.Divisibility,
Premine: runeEntry.Premine,
Rune: runeEntry.SpacedRune.Rune,
Spacers: runeEntry.SpacedRune.Spacers,
Symbol: string(runeEntry.Symbol),
Terms: entryTerms{
Amount: lo.FromPtr(terms.Amount),
Cap: lo.FromPtr(terms.Cap),
HeightStart: terms.HeightStart,
HeightEnd: terms.HeightEnd,
OffsetStart: terms.OffsetStart,
OffsetEnd: terms.OffsetEnd,
},
Turbo: runeEntry.Turbo,
EtchingTxHash: runeEntry.EtchingTxHash.String(),
},
},
}, nil
}

View File

@@ -0,0 +1,118 @@
package httphandler
import (
"fmt"
"net/url"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gofiber/fiber/v2"
"github.com/samber/lo"
)
type getTokenInfoBatchRequest struct {
Ids []string `json:"ids"`
BlockHeight uint64 `json:"blockHeight"`
AdditionalFields []string `json:"additionalFields"`
}
const getTokenInfoBatchMaxQueries = 100
func (r *getTokenInfoBatchRequest) Validate() error {
var errList []error
if len(r.Ids) == 0 {
errList = append(errList, errors.New("ids cannot be empty"))
}
if len(r.Ids) > getTokenInfoBatchMaxQueries {
errList = append(errList, errors.Errorf("cannot query more than %d ids", getTokenInfoBatchMaxQueries))
}
for i := range r.Ids {
id, err := url.QueryUnescape(r.Ids[i])
if err != nil {
return errors.WithStack(err)
}
r.Ids[i] = id
if !isRuneIdOrRuneName(r.Ids[i]) {
errList = append(errList, errors.Errorf("ids[%d]: id '%s' is not valid rune id or rune name", i, r.Ids[i]))
}
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
type getTokenInfoBatchResult struct {
List []*getTokenInfoResult `json:"list"`
}
type getTokenInfoBatchResponse = HttpResponse[getTokenInfoBatchResult]
func (h *HttpHandler) GetTokenInfoBatch(ctx *fiber.Ctx) (err error) {
var req getTokenInfoBatchRequest
if err := ctx.BodyParser(&req); err != nil {
return errors.WithStack(err)
}
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
blockHeight := req.BlockHeight
if blockHeight == 0 {
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("latest block not found")
}
return errors.Wrap(err, "error during GetLatestBlock")
}
blockHeight = uint64(blockHeader.Height)
}
runeIds := make([]runes.RuneId, 0)
for i, id := range req.Ids {
runeId, ok := h.resolveRuneId(ctx.UserContext(), id)
if !ok {
return errs.NewPublicError(fmt.Sprintf("unable to resolve rune id \"%s\" from \"ids[%d]\"", id, i))
}
runeIds = append(runeIds, runeId)
}
runeEntries, err := h.usecase.GetRuneEntryByRuneIdAndHeightBatch(ctx.UserContext(), runeIds, blockHeight)
if err != nil {
return errors.Wrap(err, "error during GetRuneEntryByRuneIdAndHeightBatch")
}
holdersCounts := make(map[runes.RuneId]int64)
if lo.Contains(req.AdditionalFields, "holdersCount") {
holdersCounts, err = h.usecase.GetTotalHoldersByRuneIds(ctx.UserContext(), runeIds, blockHeight)
if err != nil {
return errors.Wrap(err, "error during GetBalancesByRuneId")
}
}
results := make([]*getTokenInfoResult, 0, len(runeIds))
for _, runeId := range runeIds {
runeEntry, ok := runeEntries[runeId]
if !ok {
return errs.NewPublicError(fmt.Sprintf("rune not found: %s", runeId))
}
var holdersCount *int64
if lo.Contains(req.AdditionalFields, "holdersCount") {
holdersCount = lo.ToPtr(holdersCounts[runeId])
}
result, err := createTokenInfoResult(runeEntry, holdersCount)
if err != nil {
return errors.Wrap(err, "error during createTokenInfoResult")
}
results = append(results, result)
}
resp := getTokenInfoBatchResponse{
Result: &getTokenInfoBatchResult{
List: results,
},
}
return errors.WithStack(ctx.JSON(resp))
}

View File

@@ -0,0 +1,150 @@
package httphandler
import (
"fmt"
"strings"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gofiber/fiber/v2"
"github.com/samber/lo"
)
const (
getTokensMaxLimit = 1000
)
type GetTokensScope string
const (
GetTokensScopeAll GetTokensScope = "all"
GetTokensScopeOngoing GetTokensScope = "ongoing"
)
func (s GetTokensScope) IsValid() bool {
switch s {
case GetTokensScopeAll, GetTokensScopeOngoing:
return true
}
return false
}
type getTokensRequest struct {
paginationRequest
Search string `query:"search"`
BlockHeight uint64 `query:"blockHeight"`
Scope GetTokensScope `query:"scope"`
AdditionalFieldsRaw string `query:"additionalFields"` // comma-separated list of additional fields
AdditionalFields []string
}
func (r *getTokensRequest) Validate() error {
var errList []error
if err := r.paginationRequest.Validate(); err != nil {
errList = append(errList, err)
}
if r.Limit > getTokensMaxLimit {
errList = append(errList, errors.Errorf("limit must be less than or equal to 1000"))
}
if r.Scope != "" && !r.Scope.IsValid() {
errList = append(errList, errors.Errorf("invalid scope: %s", r.Scope))
}
if r.AdditionalFieldsRaw == "" {
// temporarily set default value for backward compatibility
r.AdditionalFieldsRaw = "holdersCount" // TODO: remove this default value after all clients are updated
}
r.AdditionalFields = strings.Split(r.AdditionalFieldsRaw, ",")
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
func (req *getTokensRequest) ParseDefault() error {
if err := req.paginationRequest.ParseDefault(); err != nil {
return errors.WithStack(err)
}
if req.Scope == "" {
req.Scope = GetTokensScopeAll
}
return nil
}
type getTokensResult struct {
List []*getTokenInfoResult `json:"list"`
}
type getTokensResponse = HttpResponse[getTokensResult]
func (h *HttpHandler) GetTokens(ctx *fiber.Ctx) (err error) {
var req getTokensRequest
if err := ctx.QueryParser(&req); err != nil {
return errors.WithStack(err)
}
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
if err := req.ParseDefault(); err != nil {
return errors.WithStack(err)
}
blockHeight := req.BlockHeight
if blockHeight == 0 {
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("latest block not found")
}
return errors.Wrap(err, "error during GetLatestBlock")
}
blockHeight = uint64(blockHeader.Height)
}
// remove spacers
search := strings.Replace(strings.Replace(req.Search, "•", "", -1), ".", "", -1)
var entries []*runes.RuneEntry
switch req.Scope {
case GetTokensScopeAll:
entries, err = h.usecase.GetRuneEntries(ctx.UserContext(), search, blockHeight, req.Limit, req.Offset)
if err != nil {
return errors.Wrap(err, "error during GetRuneEntryList")
}
case GetTokensScopeOngoing:
entries, err = h.usecase.GetOngoingRuneEntries(ctx.UserContext(), search, blockHeight, req.Limit, req.Offset)
if err != nil {
return errors.Wrap(err, "error during GetRuneEntryList")
}
default:
return errs.NewPublicError(fmt.Sprintf("invalid scope: %s", req.Scope))
}
runeIds := lo.Map(entries, func(item *runes.RuneEntry, _ int) runes.RuneId { return item.RuneId })
holdersCounts := make(map[runes.RuneId]int64)
if lo.Contains(req.AdditionalFields, "holdersCount") {
holdersCounts, err = h.usecase.GetTotalHoldersByRuneIds(ctx.UserContext(), runeIds, blockHeight)
if err != nil {
return errors.Wrap(err, "error during GetTotalHoldersByRuneIds")
}
}
results := make([]*getTokenInfoResult, 0, len(entries))
for _, ent := range entries {
var holdersCount *int64
if lo.Contains(req.AdditionalFields, "holdersCount") {
holdersCount = lo.ToPtr(holdersCounts[ent.RuneId])
}
result, err := createTokenInfoResult(ent, holdersCount)
if err != nil {
return errors.Wrap(err, "error during createTokenInfoResult")
}
results = append(results, result)
}
return errors.WithStack(ctx.JSON(getTokensResponse{
Result: &getTokensResult{
List: results,
},
}))
}

View File

@@ -0,0 +1,171 @@
package httphandler
import (
"encoding/hex"
"fmt"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gofiber/fiber/v2"
"github.com/samber/lo"
)
type getTransactionByHashRequest struct {
Hash string `params:"hash"`
}
func (r getTransactionByHashRequest) Validate() error {
var errList []error
if len(r.Hash) == 0 {
errList = append(errList, errs.NewPublicError("hash is required"))
}
if len(r.Hash) > chainhash.MaxHashStringSize {
errList = append(errList, errs.NewPublicError(fmt.Sprintf("hash length must be less than or equal to %d bytes", chainhash.MaxHashStringSize)))
}
if len(errList) == 0 {
return nil
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
type getTransactionByHashResponse = HttpResponse[transaction]
func (h *HttpHandler) GetTransactionByHash(ctx *fiber.Ctx) (err error) {
var req getTransactionByHashRequest
if err := ctx.ParamsParser(&req); err != nil {
return errors.WithStack(err)
}
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
hash, err := chainhash.NewHashFromStr(req.Hash)
if err != nil {
return errs.NewPublicError("invalid transaction hash")
}
tx, err := h.usecase.GetRuneTransaction(ctx.UserContext(), *hash)
if err != nil {
if errors.Is(err, errs.NotFound) {
return fiber.NewError(fiber.StatusNotFound, "transaction not found")
}
return errors.Wrap(err, "error during GetRuneTransaction")
}
allRuneIds := make(map[runes.RuneId]struct{})
for id := range tx.Mints {
allRuneIds[id] = struct{}{}
}
for id := range tx.Burns {
allRuneIds[id] = struct{}{}
}
for _, input := range tx.Inputs {
allRuneIds[input.RuneId] = struct{}{}
}
for _, output := range tx.Outputs {
allRuneIds[output.RuneId] = struct{}{}
}
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx.UserContext(), lo.Keys(allRuneIds))
if err != nil {
return errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch")
}
respTx := &transaction{
TxHash: tx.Hash,
BlockHeight: tx.BlockHeight,
Index: tx.Index,
Timestamp: tx.Timestamp.Unix(),
Inputs: make([]txInputOutput, 0, len(tx.Inputs)),
Outputs: make([]txInputOutput, 0, len(tx.Outputs)),
Mints: make(map[string]amountWithDecimal, len(tx.Mints)),
Burns: make(map[string]amountWithDecimal, len(tx.Burns)),
Extend: runeTransactionExtend{
RuneEtched: tx.RuneEtched,
Runestone: nil,
},
}
for _, input := range tx.Inputs {
address := addressFromPkScript(input.PkScript, h.network)
respTx.Inputs = append(respTx.Inputs, txInputOutput{
PkScript: hex.EncodeToString(input.PkScript),
Address: address,
Id: input.RuneId,
Amount: input.Amount,
Decimals: runeEntries[input.RuneId].Divisibility,
Index: input.Index,
})
}
for _, output := range tx.Outputs {
address := addressFromPkScript(output.PkScript, h.network)
respTx.Outputs = append(respTx.Outputs, txInputOutput{
PkScript: hex.EncodeToString(output.PkScript),
Address: address,
Id: output.RuneId,
Amount: output.Amount,
Decimals: runeEntries[output.RuneId].Divisibility,
Index: output.Index,
})
}
for id, amount := range tx.Mints {
respTx.Mints[id.String()] = amountWithDecimal{
Amount: amount,
Decimals: runeEntries[id].Divisibility,
}
}
for id, amount := range tx.Burns {
respTx.Burns[id.String()] = amountWithDecimal{
Amount: amount,
Decimals: runeEntries[id].Divisibility,
}
}
if tx.Runestone != nil {
var e *etching
if tx.Runestone.Etching != nil {
var symbol *string
if tx.Runestone.Etching.Symbol != nil {
symbol = lo.ToPtr(string(*tx.Runestone.Etching.Symbol))
}
var t *terms
if tx.Runestone.Etching.Terms != nil {
t = &terms{
Amount: tx.Runestone.Etching.Terms.Amount,
Cap: tx.Runestone.Etching.Terms.Cap,
HeightStart: tx.Runestone.Etching.Terms.HeightStart,
HeightEnd: tx.Runestone.Etching.Terms.HeightEnd,
OffsetStart: tx.Runestone.Etching.Terms.OffsetStart,
OffsetEnd: tx.Runestone.Etching.Terms.OffsetEnd,
}
}
e = &etching{
Divisibility: tx.Runestone.Etching.Divisibility,
Premine: tx.Runestone.Etching.Premine,
Rune: tx.Runestone.Etching.Rune,
Spacers: tx.Runestone.Etching.Spacers,
Symbol: symbol,
Terms: t,
Turbo: tx.Runestone.Etching.Turbo,
}
}
respTx.Extend.Runestone = &runestone{
Cenotaph: tx.Runestone.Cenotaph,
Flaws: lo.Ternary(tx.Runestone.Cenotaph, tx.Runestone.Flaws.CollectAsString(), nil),
Etching: e,
Edicts: lo.Map(tx.Runestone.Edicts, func(ed runes.Edict, _ int) edict {
return edict{
Id: ed.Id,
Amount: ed.Amount,
Output: ed.Output,
}
}),
Mint: tx.Runestone.Mint,
Pointer: tx.Runestone.Pointer,
}
}
return errors.WithStack(ctx.JSON(getTransactionByHashResponse{
Result: respTx,
}))
}

View File

@@ -1,7 +1,10 @@
package httphandler
import (
"cmp"
"encoding/hex"
"fmt"
"net/url"
"slices"
"github.com/btcsuite/btcd/chaincfg/chainhash"
@@ -14,15 +17,40 @@ import (
)
type getTransactionsRequest struct {
Wallet string `query:"wallet"`
Id string `query:"id"`
BlockHeight uint64 `query:"blockHeight"`
paginationRequest
Wallet string `query:"wallet"`
Id string `query:"id"`
FromBlock int64 `query:"fromBlock"`
ToBlock int64 `query:"toBlock"`
}
func (r getTransactionsRequest) Validate() error {
const (
getTransactionsMaxLimit = 3000
)
func (r *getTransactionsRequest) Validate() error {
var errList []error
if r.Id != "" && !isRuneIdOrRuneName(r.Id) {
errList = append(errList, errors.New("'id' is not valid rune id or rune name"))
if r.Id != "" {
id, err := url.QueryUnescape(r.Id)
if err != nil {
return errors.WithStack(err)
}
r.Id = id
if !isRuneIdOrRuneName(r.Id) {
errList = append(errList, errors.Errorf("id '%s' is not valid rune id or rune name", r.Id))
}
}
if r.FromBlock < -1 {
errList = append(errList, errors.Errorf("invalid fromBlock range"))
}
if r.ToBlock < -1 {
errList = append(errList, errors.Errorf("invalid toBlock range"))
}
if r.Limit < 0 {
errList = append(errList, errors.New("'limit' must be non-negative"))
}
if r.Limit > getTransactionsMaxLimit {
errList = append(errList, errors.Errorf("'limit' cannot exceed %d", getTransactionsMaxLimit))
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
@@ -106,6 +134,9 @@ func (h *HttpHandler) GetTransactions(ctx *fiber.Ctx) (err error) {
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
if err := req.ParseDefault(); err != nil {
return errors.WithStack(err)
}
var pkScript []byte
if req.Wallet != "" {
@@ -121,43 +152,65 @@ func (h *HttpHandler) GetTransactions(ctx *fiber.Ctx) (err error) {
var ok bool
runeId, ok = h.resolveRuneId(ctx.UserContext(), req.Id)
if !ok {
return errs.NewPublicError("unable to resolve rune id from \"id\"")
return errs.NewPublicError(fmt.Sprintf("unable to resolve rune id \"%s\" from \"id\"", req.Id))
}
}
blockHeight := req.BlockHeight
// set blockHeight to the latest block height blockHeight, pkScript, and runeId are not provided
if blockHeight == 0 && pkScript == nil && runeId == (runes.RuneId{}) {
// default to latest block
if req.ToBlock == 0 {
req.ToBlock = -1
}
// get latest block height if block height is -1
if req.FromBlock == -1 || req.ToBlock == -1 {
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("latest block not found")
}
return errors.Wrap(err, "error during GetLatestBlock")
}
blockHeight = uint64(blockHeader.Height)
if req.FromBlock == -1 {
req.FromBlock = blockHeader.Height
}
if req.ToBlock == -1 {
req.ToBlock = blockHeader.Height
}
}
txs, err := h.usecase.GetRuneTransactions(ctx.UserContext(), pkScript, runeId, blockHeight)
// validate block height range
if req.FromBlock > req.ToBlock {
return errs.NewPublicError(fmt.Sprintf("fromBlock must be less than or equal to toBlock, got fromBlock=%d, toBlock=%d", req.FromBlock, req.ToBlock))
}
txs, err := h.usecase.GetRuneTransactions(ctx.UserContext(), pkScript, runeId, uint64(req.FromBlock), uint64(req.ToBlock), req.Limit, req.Offset)
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("transactions not found")
}
return errors.Wrap(err, "error during GetRuneTransactions")
}
var allRuneIds []runes.RuneId
allRuneIds := make(map[runes.RuneId]struct{})
for _, tx := range txs {
for id := range tx.Mints {
allRuneIds = append(allRuneIds, id)
allRuneIds[id] = struct{}{}
}
for id := range tx.Burns {
allRuneIds = append(allRuneIds, id)
allRuneIds[id] = struct{}{}
}
for _, input := range tx.Inputs {
allRuneIds = append(allRuneIds, input.RuneId)
allRuneIds[input.RuneId] = struct{}{}
}
for _, output := range tx.Outputs {
allRuneIds = append(allRuneIds, output.RuneId)
allRuneIds[output.RuneId] = struct{}{}
}
}
allRuneIds = lo.Uniq(allRuneIds)
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx.UserContext(), allRuneIds)
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx.UserContext(), lo.Keys(allRuneIds))
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("rune entries not found")
}
return errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch")
}
@@ -256,12 +309,12 @@ func (h *HttpHandler) GetTransactions(ctx *fiber.Ctx) (err error) {
}
txList = append(txList, respTx)
}
// sort by block height ASC, then index ASC
// sort by block height DESC, then index DESC
slices.SortFunc(txList, func(t1, t2 transaction) int {
if t1.BlockHeight != t2.BlockHeight {
return int(t1.BlockHeight - t2.BlockHeight)
return cmp.Compare(t2.BlockHeight, t1.BlockHeight)
}
return int(t1.Index - t2.Index)
return cmp.Compare(t2.Index, t1.Index)
})
resp := getTransactionsResponse{

View File

@@ -1,8 +1,9 @@
package httphandler
import (
"net/url"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
@@ -12,19 +13,37 @@ import (
"github.com/samber/lo"
)
type getUTXOsByAddressRequest struct {
type getUTXOsRequest struct {
paginationRequest
Wallet string `params:"wallet"`
Id string `query:"id"`
BlockHeight uint64 `query:"blockHeight"`
}
func (r getUTXOsByAddressRequest) Validate() error {
const (
getUTXOsMaxLimit = 3000
)
func (r *getUTXOsRequest) Validate() error {
var errList []error
if r.Wallet == "" {
errList = append(errList, errors.New("'wallet' is required"))
}
if r.Id != "" && !isRuneIdOrRuneName(r.Id) {
errList = append(errList, errors.New("'id' is not valid rune id or rune name"))
if r.Id != "" {
id, err := url.QueryUnescape(r.Id)
if err != nil {
return errors.WithStack(err)
}
r.Id = id
if !isRuneIdOrRuneName(r.Id) {
errList = append(errList, errors.Errorf("id '%s' is not valid rune id or rune name", r.Id))
}
}
if r.Limit < 0 {
errList = append(errList, errors.New("'limit' must be non-negative"))
}
if r.Limit > getUTXOsMaxLimit {
errList = append(errList, errors.Errorf("'limit' cannot exceed %d", getUTXOsMaxLimit))
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
@@ -41,21 +60,22 @@ type utxoExtend struct {
Runes []runeBalance `json:"runes"`
}
type utxo struct {
type utxoItem struct {
TxHash chainhash.Hash `json:"txHash"`
OutputIndex uint32 `json:"outputIndex"`
Sats int64 `json:"sats"`
Extend utxoExtend `json:"extend"`
}
type getUTXOsByAddressResult struct {
List []utxo `json:"list"`
BlockHeight uint64 `json:"blockHeight"`
type getUTXOsResult struct {
List []utxoItem `json:"list"`
BlockHeight uint64 `json:"blockHeight"`
}
type getUTXOsByAddressResponse = HttpResponse[getUTXOsByAddressResult]
type getUTXOsResponse = HttpResponse[getUTXOsResult]
func (h *HttpHandler) GetUTXOsByAddress(ctx *fiber.Ctx) (err error) {
var req getUTXOsByAddressRequest
func (h *HttpHandler) GetUTXOs(ctx *fiber.Ctx) (err error) {
var req getUTXOsRequest
if err := ctx.ParamsParser(&req); err != nil {
return errors.WithStack(err)
}
@@ -65,6 +85,9 @@ func (h *HttpHandler) GetUTXOsByAddress(ctx *fiber.Ctx) (err error) {
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
if err := req.ParseDefault(); err != nil {
return errors.WithStack(err)
}
pkScript, ok := resolvePkScript(h.network, req.Wallet)
if !ok {
@@ -75,32 +98,52 @@ func (h *HttpHandler) GetUTXOsByAddress(ctx *fiber.Ctx) (err error) {
if blockHeight == 0 {
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("latest block not found")
}
return errors.Wrap(err, "error during GetLatestBlock")
}
blockHeight = uint64(blockHeader.Height)
}
outPointBalances, err := h.usecase.GetUnspentOutPointBalancesByPkScript(ctx.UserContext(), pkScript, blockHeight)
if err != nil {
return errors.Wrap(err, "error during GetBalancesByPkScript")
var utxos []*entity.RunesUTXOWithSats
if runeId, ok := h.resolveRuneId(ctx.UserContext(), req.Id); ok {
utxos, err = h.usecase.GetRunesUTXOsByRuneIdAndPkScript(ctx.UserContext(), runeId, pkScript, blockHeight, req.Limit, req.Offset)
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("utxos not found")
}
return errors.Wrap(err, "error during GetBalancesByPkScript")
}
} else {
utxos, err = h.usecase.GetRunesUTXOsByPkScript(ctx.UserContext(), pkScript, blockHeight, req.Limit, req.Offset)
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("utxos not found")
}
return errors.Wrap(err, "error during GetBalancesByPkScript")
}
}
outPointBalanceRuneIds := lo.Map(outPointBalances, func(outPointBalance *entity.OutPointBalance, _ int) runes.RuneId {
return outPointBalance.RuneId
})
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx.UserContext(), outPointBalanceRuneIds)
runeIds := make(map[runes.RuneId]struct{}, 0)
for _, utxo := range utxos {
for _, balance := range utxo.RuneBalances {
runeIds[balance.RuneId] = struct{}{}
}
}
runeIdsList := lo.Keys(runeIds)
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx.UserContext(), runeIdsList)
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("rune entries not found")
}
return errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch")
}
groupedBalances := lo.GroupBy(outPointBalances, func(outPointBalance *entity.OutPointBalance) wire.OutPoint {
return outPointBalance.OutPoint
})
utxoList := make([]utxo, 0, len(groupedBalances))
for outPoint, balances := range groupedBalances {
runeBalances := make([]runeBalance, 0, len(balances))
for _, balance := range balances {
utxoRespList := make([]utxoItem, 0, len(utxos))
for _, utxo := range utxos {
runeBalances := make([]runeBalance, 0, len(utxo.RuneBalances))
for _, balance := range utxo.RuneBalances {
runeEntry := runeEntries[balance.RuneId]
runeBalances = append(runeBalances, runeBalance{
RuneId: balance.RuneId,
@@ -111,34 +154,20 @@ func (h *HttpHandler) GetUTXOsByAddress(ctx *fiber.Ctx) (err error) {
})
}
utxoList = append(utxoList, utxo{
TxHash: outPoint.Hash,
OutputIndex: outPoint.Index,
utxoRespList = append(utxoRespList, utxoItem{
TxHash: utxo.OutPoint.Hash,
OutputIndex: utxo.OutPoint.Index,
Sats: utxo.Sats,
Extend: utxoExtend{
Runes: runeBalances,
},
})
}
// filter by req.Id if exists
{
runeId, ok := h.resolveRuneId(ctx.UserContext(), req.Id)
if ok {
utxoList = lo.Filter(utxoList, func(u utxo, _ int) bool {
for _, runeBalance := range u.Extend.Runes {
if runeBalance.RuneId == runeId {
return true
}
}
return false
})
}
}
resp := getUTXOsByAddressResponse{
Result: &getUTXOsByAddressResult{
resp := getUTXOsResponse{
Result: &getUTXOsResult{
BlockHeight: blockHeight,
List: utxoList,
List: utxoRespList,
},
}

View File

@@ -0,0 +1,92 @@
package httphandler
import (
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/indexer-network/modules/runes/usecase"
"github.com/gofiber/fiber/v2"
"github.com/samber/lo"
)
type getUTXOsOutputByLocationRequest struct {
TxHash string `params:"txHash"`
OutputIndex int32 `query:"outputIndex"`
}
func (r getUTXOsOutputByLocationRequest) Validate() error {
var errList []error
if r.TxHash == "" {
errList = append(errList, errors.New("'txHash' is required"))
}
if r.OutputIndex < 0 {
errList = append(errList, errors.New("'outputIndex' must be non-negative"))
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
type getUTXOsOutputByTxIdResponse = HttpResponse[utxoItem]
func (h *HttpHandler) GetUTXOsOutputByLocation(ctx *fiber.Ctx) (err error) {
var req getUTXOsOutputByLocationRequest
if err := ctx.ParamsParser(&req); err != nil {
return errors.WithStack(err)
}
if err := ctx.QueryParser(&req); err != nil {
return errors.WithStack(err)
}
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
txHash, err := chainhash.NewHashFromStr(req.TxHash)
if err != nil {
return errs.WithPublicMessage(err, "unable to resolve txHash")
}
utxo, err := h.usecase.GetUTXOsOutputByLocation(ctx.UserContext(), *txHash, uint32(req.OutputIndex))
if err != nil {
if errors.Is(err, usecase.ErrUTXONotFound) {
return errs.NewPublicError("utxo not found")
}
return errors.WithStack(err)
}
runeIds := make(map[runes.RuneId]struct{}, 0)
for _, balance := range utxo.RuneBalances {
runeIds[balance.RuneId] = struct{}{}
}
runeIdsList := lo.Keys(runeIds)
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx.UserContext(), runeIdsList)
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("rune entries not found")
}
return errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch")
}
runeBalances := make([]runeBalance, 0, len(utxo.RuneBalances))
for _, balance := range utxo.RuneBalances {
runeEntry := runeEntries[balance.RuneId]
runeBalances = append(runeBalances, runeBalance{
RuneId: balance.RuneId,
Rune: runeEntry.SpacedRune,
Symbol: string(runeEntry.Symbol),
Amount: balance.Amount,
Divisibility: runeEntry.Divisibility,
})
}
resp := getUTXOsOutputByTxIdResponse{
Result: &utxoItem{
TxHash: utxo.OutPoint.Hash,
OutputIndex: utxo.OutPoint.Index,
Sats: utxo.Sats,
Extend: utxoExtend{
Runes: runeBalances,
},
},
}
return errors.WithStack(ctx.JSON(resp))
}

View File

@@ -0,0 +1,136 @@
package httphandler
import (
"context"
"fmt"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/indexer-network/modules/runes/usecase"
"github.com/gofiber/fiber/v2"
"github.com/samber/lo"
"golang.org/x/sync/errgroup"
)
type getUTXOsOutputByLocationQuery struct {
TxHash string `json:"txHash"`
OutputIndex int32 `json:"outputIndex"`
}
type getUTXOsOutputByLocationBatchRequest struct {
Queries []getUTXOsOutputByLocationQuery `json:"queries"`
}
const getUTXOsOutputByLocationBatchMaxQueries = 100
func (r getUTXOsOutputByLocationBatchRequest) Validate() error {
var errList []error
if len(r.Queries) == 0 {
errList = append(errList, errors.New("at least one query is required"))
}
if len(r.Queries) > getUTXOsOutputByLocationBatchMaxQueries {
errList = append(errList, errors.Errorf("cannot exceed %d queries", getUTXOsOutputByLocationBatchMaxQueries))
}
for i, query := range r.Queries {
if query.TxHash == "" {
errList = append(errList, errors.Errorf("queries[%d]: 'txHash' is required", i))
}
if query.OutputIndex < 0 {
errList = append(errList, errors.Errorf("queries[%d]: 'outputIndex' must be non-negative", i))
}
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
type getUTXOsOutputByLocationBatchResult struct {
List []*utxoItem `json:"list"`
}
type getUTXOsOutputByLocationBatchResponse = HttpResponse[getUTXOsOutputByLocationBatchResult]
func (h *HttpHandler) GetUTXOsOutputByLocationBatch(ctx *fiber.Ctx) (err error) {
var req getUTXOsOutputByLocationBatchRequest
if err := ctx.BodyParser(&req); err != nil {
return errors.WithStack(err)
}
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
processQuery := func(ctx context.Context, query getUTXOsOutputByLocationQuery, queryIndex int) (*utxoItem, error) {
txHash, err := chainhash.NewHashFromStr(query.TxHash)
if err != nil {
return nil, errs.WithPublicMessage(err, fmt.Sprintf("unable to parse txHash from \"queries[%d].txHash\"", queryIndex))
}
utxo, err := h.usecase.GetUTXOsOutputByLocation(ctx, *txHash, uint32(query.OutputIndex))
if err != nil {
if errors.Is(err, usecase.ErrUTXONotFound) {
return nil, errs.NewPublicError(fmt.Sprintf("utxo not found for queries[%d]", queryIndex))
}
return nil, errors.WithStack(err)
}
runeIds := make(map[runes.RuneId]struct{}, 0)
for _, balance := range utxo.RuneBalances {
runeIds[balance.RuneId] = struct{}{}
}
runeIdsList := lo.Keys(runeIds)
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx, runeIdsList)
if err != nil {
if errors.Is(err, errs.NotFound) {
return nil, errs.NewPublicError(fmt.Sprintf("rune entries not found for queries[%d]", queryIndex))
}
return nil, errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch")
}
runeBalances := make([]runeBalance, 0, len(utxo.RuneBalances))
for _, balance := range utxo.RuneBalances {
runeEntry := runeEntries[balance.RuneId]
runeBalances = append(runeBalances, runeBalance{
RuneId: balance.RuneId,
Rune: runeEntry.SpacedRune,
Symbol: string(runeEntry.Symbol),
Amount: balance.Amount,
Divisibility: runeEntry.Divisibility,
})
}
return &utxoItem{
TxHash: utxo.OutPoint.Hash,
OutputIndex: utxo.OutPoint.Index,
Sats: utxo.Sats,
Extend: utxoExtend{
Runes: runeBalances,
},
}, nil
}
results := make([]*utxoItem, len(req.Queries))
eg, ectx := errgroup.WithContext(ctx.UserContext())
for i, query := range req.Queries {
i := i
query := query
eg.Go(func() error {
result, err := processQuery(ectx, query, i)
if err != nil {
return errors.Wrapf(err, "error during processQuery for query %d", i)
}
results[i] = result
return nil
})
}
if err := eg.Wait(); err != nil {
return errors.WithStack(err)
}
resp := getUTXOsOutputByLocationBatchResponse{
Result: &getUTXOsOutputByLocationBatchResult{
List: results,
},
}
return errors.WithStack(ctx.JSON(resp))
}

View File

@@ -7,7 +7,9 @@ import (
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/txscript"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/indexer-network/modules/runes/usecase"
"github.com/gaze-network/indexer-network/pkg/logger"
@@ -31,6 +33,53 @@ type HttpResponse[T any] struct {
Result *T `json:"result,omitempty"`
}
type paginationRequest struct {
Offset int32 `query:"offset"`
Limit int32 `query:"limit"`
// OrderBy string `query:"orderBy"` // ASC or DESC
// SortBy string `query:"sortBy"` // column name
}
func (req paginationRequest) Validate() error {
var errList []error
// this just safeguard for limit,
// each path should have own validation.
if req.Limit > 10000 {
errList = append(errList, errors.Errorf("too large limit"))
}
if req.Limit < 0 {
errList = append(errList, errors.Errorf("limit must be greater than or equal to 0"))
}
if req.Offset < 0 {
errList = append(errList, errors.Errorf("offset must be greater than or equal to 0"))
}
// TODO:
// if req.OrderBy != "" && req.OrderBy != "ASC" && req.OrderBy != "DESC" {
// errList = append(errList, errors.Errorf("invalid orderBy value, must be `ASC` or `DESC`"))
// }
return errs.WithPublicMessage(errors.Join(errList...), "pagination validation error")
}
func (req *paginationRequest) ParseDefault() error {
if req == nil {
return nil
}
if req.Limit == 0 {
req.Limit = 100
}
// TODO:
// if req.OrderBy == "" {
// req.OrderBy = "ASC"
// }
return nil
}
func resolvePkScript(network common.Network, wallet string) ([]byte, bool) {
if wallet == "" {
return nil, false
@@ -41,6 +90,10 @@ func resolvePkScript(network common.Network, wallet string) ([]byte, bool) {
return &chaincfg.MainNetParams
case common.NetworkTestnet:
return &chaincfg.TestNet3Params
case common.NetworkFractalMainnet:
return &chaincfg.MainNetParams
case common.NetworkFractalTestnet:
return &chaincfg.MainNetParams
}
panic("invalid network")
}()

View File

@@ -7,12 +7,17 @@ import (
func (h *HttpHandler) Mount(router fiber.Router) error {
r := router.Group("/v2/runes")
r.Post("/balances/wallet/batch", h.GetBalancesByAddressBatch)
r.Get("/balances/wallet/:wallet", h.GetBalancesByAddress)
r.Post("/balances/wallet/batch", h.GetBalancesBatch)
r.Get("/balances/wallet/:wallet", h.GetBalances)
r.Get("/transactions", h.GetTransactions)
r.Get("/transactions/hash/:hash", h.GetTransactionByHash)
r.Get("/holders/:id", h.GetHolders)
r.Post("/info/batch", h.GetTokenInfoBatch)
r.Get("/info/:id", h.GetTokenInfo)
r.Get("/utxos/wallet/:wallet", h.GetUTXOsByAddress)
r.Get("/utxos/wallet/:wallet", h.GetUTXOs)
r.Post("/utxos/output/batch", h.GetUTXOsOutputByLocationBatch)
r.Get("/utxos/output/:txHash", h.GetUTXOsOutputByLocation)
r.Get("/block", h.GetCurrentBlock)
r.Get("/tokens", h.GetTokens)
return nil
}

View File

@@ -1,27 +0,0 @@
package runes
import (
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/core/types"
)
const (
Version = "v0.0.1"
DBVersion = 1
EventHashVersion = 1
)
var startingBlockHeader = map[common.Network]types.BlockHeader{
common.NetworkMainnet: {
Height: 839999,
Hash: *utils.Must(chainhash.NewHashFromStr("0000000000000000000172014ba58d66455762add0512355ad651207918494ab")),
PrevBlock: *utils.Must(chainhash.NewHashFromStr("00000000000000000001dcce6ce7c8a45872cafd1fb04732b447a14a91832591")),
},
common.NetworkTestnet: {
Height: 2583200,
Hash: *utils.Must(chainhash.NewHashFromStr("000000000006c5f0dfcd9e0e81f27f97a87aef82087ffe69cd3c390325bb6541")),
PrevBlock: *utils.Must(chainhash.NewHashFromStr("00000000000668f3bafac992f53424774515440cb47e1cb9e73af3f496139e28")),
},
}

View File

@@ -0,0 +1,122 @@
package constants
import (
"fmt"
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/uint128"
"github.com/samber/lo"
)
const (
Version = "v0.0.1"
DBVersion = 1
EventHashVersion = 1
)
// starting block heights and hashes should be 1 block before activation block, as indexer will start from the block after this value
var StartingBlockHeader = map[common.Network]types.BlockHeader{
common.NetworkMainnet: {
Height: 839999,
},
common.NetworkTestnet: {
Height: 2519999,
},
common.NetworkFractalMainnet: {
Height: 83999,
},
common.NetworkFractalTestnet: {
Height: 83999,
},
}
type GenesisRuneConfig struct {
RuneId runes.RuneId
Name string
Number uint64
Divisibility uint8
Premine uint128.Uint128
SpacedRune runes.SpacedRune
Symbol rune
Terms *runes.Terms
Turbo bool
EtchingTxHash chainhash.Hash
EtchedAt time.Time
}
var GenesisRuneConfigMap = map[common.Network]GenesisRuneConfig{
common.NetworkMainnet: {
RuneId: runes.RuneId{BlockHeight: 1, TxIndex: 0},
Number: 0,
Divisibility: 0,
Premine: uint128.Zero,
SpacedRune: runes.NewSpacedRune(runes.NewRune(2055900680524219742), 0b10000000),
Symbol: '\u29c9',
Terms: &runes.Terms{
Amount: lo.ToPtr(uint128.From64(1)),
Cap: &uint128.Max,
HeightStart: lo.ToPtr(uint64(840000)),
HeightEnd: lo.ToPtr(uint64(1050000)),
OffsetStart: nil,
OffsetEnd: nil,
},
Turbo: true,
EtchingTxHash: chainhash.Hash{},
EtchedAt: time.Unix(0, 0),
},
common.NetworkFractalMainnet: {
RuneId: runes.RuneId{BlockHeight: 1, TxIndex: 0},
Number: 0,
Divisibility: 0,
Premine: uint128.Zero,
SpacedRune: runes.NewSpacedRune(runes.NewRune(2055900680524219742), 0b10000000),
Symbol: '\u29c9',
Terms: &runes.Terms{
Amount: lo.ToPtr(uint128.From64(1)),
Cap: &uint128.Max,
HeightStart: lo.ToPtr(uint64(84000)),
HeightEnd: lo.ToPtr(uint64(2184000)),
OffsetStart: nil,
OffsetEnd: nil,
},
Turbo: true,
EtchingTxHash: chainhash.Hash{},
EtchedAt: time.Unix(0, 0),
},
common.NetworkFractalTestnet: {
RuneId: runes.RuneId{BlockHeight: 1, TxIndex: 0},
Number: 0,
Divisibility: 0,
Premine: uint128.Zero,
SpacedRune: runes.NewSpacedRune(runes.NewRune(2055900680524219742), 0b10000000),
Symbol: '\u29c9',
Terms: &runes.Terms{
Amount: lo.ToPtr(uint128.From64(1)),
Cap: &uint128.Max,
HeightStart: lo.ToPtr(uint64(84000)),
HeightEnd: lo.ToPtr(uint64(2184000)),
OffsetStart: nil,
OffsetEnd: nil,
},
Turbo: true,
EtchingTxHash: chainhash.Hash{},
EtchedAt: time.Unix(0, 0),
},
}
func NetworkHasGenesisRune(network common.Network) bool {
switch network {
case common.NetworkMainnet, common.NetworkFractalMainnet, common.NetworkFractalTestnet:
return true
case common.NetworkTestnet:
return false
default:
logger.Panic(fmt.Sprintf("unsupported network: %s", network))
return false
}
}

View File

@@ -1,5 +1,6 @@
BEGIN;
CREATE EXTENSION IF NOT EXISTS pg_trgm;
-- Indexer Client Information
CREATE TABLE IF NOT EXISTS "runes_indexer_stats" (
@@ -48,6 +49,7 @@ CREATE TABLE IF NOT EXISTS "runes_entries" (
"etched_at" TIMESTAMP NOT NULL
);
CREATE UNIQUE INDEX IF NOT EXISTS runes_entries_rune_idx ON "runes_entries" USING BTREE ("rune");
CREATE INDEX IF NOT EXISTS runes_entries_rune_gin_idx ON "runes_entries" USING GIN ("rune" gin_trgm_ops); -- to speed up queries with LIKE operator
CREATE UNIQUE INDEX IF NOT EXISTS runes_entries_number_idx ON "runes_entries" USING BTREE ("number");
CREATE TABLE IF NOT EXISTS "runes_entry_states" (
@@ -118,5 +120,7 @@ CREATE TABLE IF NOT EXISTS "runes_balances" (
"amount" DECIMAL NOT NULL,
PRIMARY KEY ("pkscript", "rune_id", "block_height")
);
CREATE INDEX IF NOT EXISTS runes_balances_rune_id_block_height_idx ON "runes_balances" USING BTREE ("rune_id", "block_height");
CREATE INDEX IF NOT EXISTS runes_balances_pkscript_block_height_idx ON "runes_balances" USING BTREE ("pkscript", "block_height");
COMMIT;

View File

@@ -0,0 +1,104 @@
-- name: BatchCreateRunesBalances :exec
INSERT INTO runes_balances ("pkscript", "block_height", "rune_id", "amount")
VALUES(
unnest(@pkscript_arr::TEXT[]),
unnest(@block_height_arr::INT[]),
unnest(@rune_id_arr::TEXT[]),
unnest(@amount_arr::DECIMAL[])
);
-- name: BatchCreateRuneEntries :exec
INSERT INTO runes_entries ("rune_id", "rune", "number", "spacers", "premine", "symbol", "divisibility", "terms", "terms_amount", "terms_cap", "terms_height_start", "terms_height_end", "terms_offset_start", "terms_offset_end", "turbo", "etching_block", "etching_tx_hash", "etched_at")
VALUES(
unnest(@rune_id_arr::TEXT[]),
unnest(@rune_arr::TEXT[]),
unnest(@number_arr::BIGINT[]),
unnest(@spacers_arr::INT[]),
unnest(@premine_arr::DECIMAL[]),
unnest(@symbol_arr::INT[]),
unnest(@divisibility_arr::SMALLINT[]),
unnest(@terms_arr::BOOLEAN[]),
unnest(@terms_amount_arr::DECIMAL[]),
unnest(@terms_cap_arr::DECIMAL[]),
unnest(@terms_height_start_arr::INT[]), -- nullable (need patch)
unnest(@terms_height_end_arr::INT[]), -- nullable (need patch)
unnest(@terms_offset_start_arr::INT[]), -- nullable (need patch)
unnest(@terms_offset_end_arr::INT[]), -- nullable (need patch)
unnest(@turbo_arr::BOOLEAN[]),
unnest(@etching_block_arr::INT[]),
unnest(@etching_tx_hash_arr::TEXT[]),
unnest(@etched_at_arr::TIMESTAMP[])
);
-- name: BatchCreateRuneEntryStates :exec
INSERT INTO runes_entry_states ("rune_id", "block_height", "mints", "burned_amount", "completed_at", "completed_at_height")
VALUES(
unnest(@rune_id_arr::TEXT[]),
unnest(@block_height_arr::INT[]),
unnest(@mints_arr::DECIMAL[]),
unnest(@burned_amount_arr::DECIMAL[]),
unnest(@completed_at_arr::TIMESTAMP[]),
unnest(@completed_at_height_arr::INT[]) -- nullable (need patch)
);
-- name: BatchCreateRunesOutpointBalances :exec
INSERT INTO runes_outpoint_balances ("rune_id", "pkscript", "tx_hash", "tx_idx", "amount", "block_height", "spent_height")
VALUES(
unnest(@rune_id_arr::TEXT[]),
unnest(@pkscript_arr::TEXT[]),
unnest(@tx_hash_arr::TEXT[]),
unnest(@tx_idx_arr::INT[]),
unnest(@amount_arr::DECIMAL[]),
unnest(@block_height_arr::INT[]),
unnest(@spent_height_arr::INT[]) -- nullable (need patch)
);
-- name: BatchSpendOutpointBalances :exec
UPDATE runes_outpoint_balances
SET "spent_height" = @spent_height::INT
FROM (
SELECT
unnest(@tx_hash_arr::TEXT[]) AS tx_hash,
unnest(@tx_idx_arr::INT[]) AS tx_idx
) AS input
WHERE "runes_outpoint_balances"."tx_hash" = "input"."tx_hash" AND "runes_outpoint_balances"."tx_idx" = "input"."tx_idx";
-- name: BatchCreateRunestones :exec
INSERT INTO runes_runestones ("tx_hash", "block_height", "etching", "etching_divisibility", "etching_premine", "etching_rune", "etching_spacers", "etching_symbol", "etching_terms", "etching_terms_amount", "etching_terms_cap", "etching_terms_height_start", "etching_terms_height_end", "etching_terms_offset_start", "etching_terms_offset_end", "etching_turbo", "edicts", "mint", "pointer", "cenotaph", "flaws")
VALUES(
unnest(@tx_hash_arr::TEXT[]),
unnest(@block_height_arr::INT[]),
unnest(@etching_arr::BOOLEAN[]),
unnest(@etching_divisibility_arr::SMALLINT[]), -- nullable (need patch)
unnest(@etching_premine_arr::DECIMAL[]),
unnest(@etching_rune_arr::TEXT[]), -- nullable (need patch)
unnest(@etching_spacers_arr::INT[]), -- nullable (need patch)
unnest(@etching_symbol_arr::INT[]), -- nullable (need patch)
unnest(@etching_terms_arr::BOOLEAN[]), -- nullable (need patch)
unnest(@etching_terms_amount_arr::DECIMAL[]),
unnest(@etching_terms_cap_arr::DECIMAL[]),
unnest(@etching_terms_height_start_arr::INT[]), -- nullable (need patch)
unnest(@etching_terms_height_end_arr::INT[]), -- nullable (need patch)
unnest(@etching_terms_offset_start_arr::INT[]), -- nullable (need patch)
unnest(@etching_terms_offset_end_arr::INT[]), -- nullable (need patch)
unnest(@etching_turbo_arr::BOOLEAN[]), -- nullable (need patch)
unnest(@edicts_arr::JSONB[]),
unnest(@mint_arr::TEXT[]), -- nullable (need patch)
unnest(@pointer_arr::INT[]), -- nullable (need patch)
unnest(@cenotaph_arr::BOOLEAN[]),
unnest(@flaws_arr::INT[])
);
-- name: BatchCreateRuneTransactions :exec
INSERT INTO runes_transactions ("hash", "block_height", "index", "timestamp", "inputs", "outputs", "mints", "burns", "rune_etched")
VALUES (
unnest(@hash_arr::TEXT[]),
unnest(@block_height_arr::INT[]),
unnest(@index_arr::INT[]),
unnest(@timestamp_arr::TIMESTAMP[]),
unnest(@inputs_arr::JSONB[]),
unnest(@outputs_arr::JSONB[]),
unnest(@mints_arr::JSONB[]),
unnest(@burns_arr::JSONB[]),
unnest(@rune_etched_arr::BOOLEAN[])
);

View File

@@ -2,22 +2,48 @@
WITH balances AS (
SELECT DISTINCT ON (rune_id) * FROM runes_balances WHERE pkscript = $1 AND block_height <= $2 ORDER BY rune_id, block_height DESC
)
SELECT * FROM balances WHERE amount > 0;
SELECT * FROM balances WHERE amount > 0 ORDER BY amount DESC, rune_id LIMIT $3 OFFSET $4;
-- name: GetBalancesByRuneId :many
WITH balances AS (
SELECT DISTINCT ON (pkscript) * FROM runes_balances WHERE rune_id = $1 AND block_height <= $2 ORDER BY pkscript, block_height DESC
)
SELECT * FROM balances WHERE amount > 0;
SELECT * FROM balances WHERE amount > 0 ORDER BY amount DESC, pkscript LIMIT $3 OFFSET $4;
-- name: GetBalanceByPkScriptAndRuneId :one
SELECT * FROM runes_balances WHERE pkscript = $1 AND rune_id = $2 AND block_height <= $3 ORDER BY block_height DESC LIMIT 1;
-- name: GetTotalHoldersByRuneIds :many
WITH balances AS (
SELECT DISTINCT ON (rune_id, pkscript) * FROM runes_balances WHERE rune_id = ANY(@rune_ids::TEXT[]) AND block_height <= @block_height ORDER BY rune_id, pkscript, block_height DESC
)
SELECT rune_id, COUNT(DISTINCT pkscript) FROM balances WHERE amount > 0 GROUP BY rune_id;
-- name: GetOutPointBalancesAtOutPoint :many
SELECT * FROM runes_outpoint_balances WHERE tx_hash = $1 AND tx_idx = $2;
-- name: GetUnspentOutPointBalancesByPkScript :many
SELECT * FROM runes_outpoint_balances WHERE pkscript = @pkScript AND block_height <= @block_height AND (spent_height IS NULL OR spent_height > @block_height);
-- name: GetRunesUTXOsByPkScript :many
SELECT tx_hash, tx_idx, max("pkscript") as pkscript, array_agg("rune_id") as rune_ids, array_agg("amount") as amounts
FROM runes_outpoint_balances
WHERE
pkscript = @pkScript AND
block_height <= @block_height AND
(spent_height IS NULL OR spent_height > @block_height)
GROUP BY tx_hash, tx_idx
ORDER BY tx_hash, tx_idx
LIMIT $1 OFFSET $2;
-- name: GetRunesUTXOsByRuneIdAndPkScript :many
SELECT tx_hash, tx_idx, max("pkscript") as pkscript, array_agg("rune_id") as rune_ids, array_agg("amount") as amounts
FROM runes_outpoint_balances
WHERE
pkscript = @pkScript AND
block_height <= @block_height AND
(spent_height IS NULL OR spent_height > @block_height)
GROUP BY tx_hash, tx_idx
HAVING array_agg("rune_id") @> @rune_ids::text[]
ORDER BY tx_hash, tx_idx
LIMIT $1 OFFSET $2;
-- name: GetRuneEntriesByRuneIds :many
WITH states AS (
@@ -37,6 +63,49 @@ SELECT * FROM runes_entries
LEFT JOIN states ON runes_entries.rune_id = states.rune_id
WHERE runes_entries.rune_id = ANY(@rune_ids::text[]) AND etching_block <= @height;
-- name: GetRuneEntries :many
WITH states AS (
-- select latest state
SELECT DISTINCT ON (rune_id) * FROM runes_entry_states WHERE block_height <= @height ORDER BY rune_id, block_height DESC
)
SELECT * FROM runes_entries
LEFT JOIN states ON runes_entries.rune_id = states.rune_id
WHERE (
@search = '' OR
runes_entries.rune ILIKE @search || '%'
)
ORDER BY runes_entries.number
LIMIT @_limit OFFSET @_offset;
-- name: GetOngoingRuneEntries :many
WITH states AS (
-- select latest state
SELECT DISTINCT ON (rune_id) * FROM runes_entry_states WHERE block_height <= @height::integer ORDER BY rune_id, block_height DESC
)
SELECT * FROM runes_entries
LEFT JOIN states ON runes_entries.rune_id = states.rune_id
WHERE (
runes_entries.terms = TRUE AND
COALESCE(runes_entries.terms_amount, 0) != 0 AND
COALESCE(runes_entries.terms_cap, 0) != 0 AND
states.mints < runes_entries.terms_cap AND
(
runes_entries.terms_height_start IS NULL OR runes_entries.terms_height_start <= @height::integer
) AND (
runes_entries.terms_height_end IS NULL OR @height::integer <= runes_entries.terms_height_end
) AND (
runes_entries.terms_offset_start IS NULL OR runes_entries.terms_offset_start + runes_entries.etching_block <= @height::integer
) AND (
runes_entries.terms_offset_end IS NULL OR @height::integer <= runes_entries.terms_offset_start + runes_entries.etching_block
)
) AND (
@search::text = '' OR
runes_entries.rune ILIKE '%' || @search::text || '%'
)
ORDER BY states.mints DESC
LIMIT @_limit OFFSET @_offset;
-- name: GetRuneIdFromRune :one
SELECT rune_id FROM runes_entries WHERE rune = $1;
@@ -55,8 +124,14 @@ SELECT * FROM runes_transactions
OR runes_transactions.burns ? @rune_id
OR (runes_transactions.rune_etched = TRUE AND runes_transactions.block_height = @rune_id_block_height AND runes_transactions.index = @rune_id_tx_index)
) AND (
@block_height::INT = 0 OR runes_transactions.block_height = @block_height::INT -- if @block_height > 0, apply block_height filter
);
@from_block <= runes_transactions.block_height AND runes_transactions.block_height <= @to_block
)
ORDER BY runes_transactions.block_height DESC, runes_transactions.index DESC LIMIT $1 OFFSET $2;
-- name: GetRuneTransaction :one
SELECT * FROM runes_transactions
LEFT JOIN runes_runestones ON runes_transactions.hash = runes_runestones.tx_hash
WHERE hash = $1 LIMIT 1;
-- name: CountRuneEntries :one
SELECT COUNT(*) FROM runes_entries;
@@ -75,13 +150,13 @@ INSERT INTO runes_transactions (hash, block_height, index, timestamp, inputs, ou
INSERT INTO runes_runestones (tx_hash, block_height, etching, etching_divisibility, etching_premine, etching_rune, etching_spacers, etching_symbol, etching_terms, etching_terms_amount, etching_terms_cap, etching_terms_height_start, etching_terms_height_end, etching_terms_offset_start, etching_terms_offset_end, etching_turbo, edicts, mint, pointer, cenotaph, flaws)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21);
-- name: CreateOutPointBalances :batchexec
-- name: CreateOutPointBalance :exec
INSERT INTO runes_outpoint_balances (rune_id, pkscript, tx_hash, tx_idx, amount, block_height, spent_height) VALUES ($1, $2, $3, $4, $5, $6, $7);
-- name: SpendOutPointBalances :exec
-- name: SpendOutPointBalance :exec
UPDATE runes_outpoint_balances SET spent_height = $1 WHERE tx_hash = $2 AND tx_idx = $3;
-- name: CreateRuneBalanceAtBlock :batchexec
-- name: CreateRuneBalance :exec
INSERT INTO runes_balances (pkscript, block_height, rune_id, amount) VALUES ($1, $2, $3, $4);
-- name: GetLatestIndexedBlock :one

View File

@@ -3,11 +3,11 @@ package datagateway
import (
"context"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
)
type RunesDataGateway interface {
@@ -27,10 +27,12 @@ type RunesReaderDataGateway interface {
GetLatestBlock(ctx context.Context) (types.BlockHeader, error)
GetIndexedBlockByHeight(ctx context.Context, height int64) (*entity.IndexedBlock, error)
// GetRuneTransactions returns the runes transactions, filterable by pkScript, runeId and height. If pkScript, runeId or height is zero value, that filter is ignored.
GetRuneTransactions(ctx context.Context, pkScript []byte, runeId runes.RuneId, height uint64) ([]*entity.RuneTransaction, error)
GetRuneTransactions(ctx context.Context, pkScript []byte, runeId runes.RuneId, fromBlock, toBlock uint64, limit int32, offset int32) ([]*entity.RuneTransaction, error)
GetRuneTransaction(ctx context.Context, txHash chainhash.Hash) (*entity.RuneTransaction, error)
GetRunesBalancesAtOutPoint(ctx context.Context, outPoint wire.OutPoint) (map[runes.RuneId]*entity.OutPointBalance, error)
GetUnspentOutPointBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) ([]*entity.OutPointBalance, error)
GetRunesUTXOsByRuneIdAndPkScript(ctx context.Context, runeId runes.RuneId, pkScript []byte, blockHeight uint64, limit int32, offset int32) ([]*entity.RunesUTXO, error)
GetRunesUTXOsByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64, limit int32, offset int32) ([]*entity.RunesUTXO, error)
// GetRuneIdFromRune returns the RuneId for the given rune. Returns errs.NotFound if the rune entry is not found.
GetRuneIdFromRune(ctx context.Context, rune runes.Rune) (runes.RuneId, error)
// GetRuneEntryByRuneId returns the RuneEntry for the given runeId. Returns errs.NotFound if the rune entry is not found.
@@ -41,25 +43,33 @@ type RunesReaderDataGateway interface {
GetRuneEntryByRuneIdAndHeight(ctx context.Context, runeId runes.RuneId, blockHeight uint64) (*runes.RuneEntry, error)
// GetRuneEntryByRuneIdAndHeightBatch returns the RuneEntries for the given runeIds and block height.
GetRuneEntryByRuneIdAndHeightBatch(ctx context.Context, runeIds []runes.RuneId, blockHeight uint64) (map[runes.RuneId]*runes.RuneEntry, error)
// GetRuneEntries returns a list of rune entries, sorted by etching order. If search is not empty, it will filter the results by rune name (prefix).
GetRuneEntries(ctx context.Context, search string, blockHeight uint64, limit int32, offset int32) ([]*runes.RuneEntry, error)
// GetOngoingRuneEntries returns a list of ongoing rune entries (can still mint), sorted by mint progress percent. If search is not empty, it will filter the results by rune name (prefix).
GetOngoingRuneEntries(ctx context.Context, search string, blockHeight uint64, limit int32, offset int32) ([]*runes.RuneEntry, error)
// CountRuneEntries returns the number of existing rune entries.
CountRuneEntries(ctx context.Context) (uint64, error)
// GetBalancesByPkScript returns the balances for the given pkScript at the given blockHeight.
GetBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) (map[runes.RuneId]*entity.Balance, error)
// Use limit = -1 as no limit.
GetBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64, limit int32, offset int32) ([]*entity.Balance, error)
// GetBalancesByRuneId returns the balances for the given runeId at the given blockHeight.
// Cannot use []byte as map key, so we're returning as slice.
GetBalancesByRuneId(ctx context.Context, runeId runes.RuneId, blockHeight uint64) ([]*entity.Balance, error)
// Use limit = -1 as no limit.
GetBalancesByRuneId(ctx context.Context, runeId runes.RuneId, blockHeight uint64, limit int32, offset int32) ([]*entity.Balance, error)
// GetBalancesByPkScriptAndRuneId returns the balance for the given pkScript and runeId at the given blockHeight.
GetBalanceByPkScriptAndRuneId(ctx context.Context, pkScript []byte, runeId runes.RuneId, blockHeight uint64) (*entity.Balance, error)
// GetTotalHoldersByRuneIds returns the total holders of each the given runeIds.
GetTotalHoldersByRuneIds(ctx context.Context, runeIds []runes.RuneId, blockHeight uint64) (map[runes.RuneId]int64, error)
}
type RunesWriterDataGateway interface {
CreateRuneEntry(ctx context.Context, entry *runes.RuneEntry, blockHeight uint64) error
CreateRuneEntryState(ctx context.Context, entry *runes.RuneEntry, blockHeight uint64) error
CreateRuneEntries(ctx context.Context, entries []*runes.RuneEntry) error
CreateRuneEntryStates(ctx context.Context, entries []*runes.RuneEntry, blockHeight uint64) error
CreateOutPointBalances(ctx context.Context, outPointBalances []*entity.OutPointBalance) error
SpendOutPointBalances(ctx context.Context, outPoint wire.OutPoint, blockHeight uint64) error
CreateRuneBalances(ctx context.Context, params []CreateRuneBalancesParams) error
CreateRuneTransaction(ctx context.Context, tx *entity.RuneTransaction) error
SpendOutPointBalancesBatch(ctx context.Context, outPoints []wire.OutPoint, blockHeight uint64) error
CreateRuneBalances(ctx context.Context, params []*entity.Balance) error
CreateRuneTransactions(ctx context.Context, txs []*entity.RuneTransaction) error
CreateIndexedBlock(ctx context.Context, block *entity.IndexedBlock) error
// TODO: collapse these into a single function (ResetStateToHeight)?
@@ -72,10 +82,3 @@ type RunesWriterDataGateway interface {
UnspendOutPointBalancesSinceHeight(ctx context.Context, height uint64) error
DeleteRuneBalancesSinceHeight(ctx context.Context, height uint64) error
}
type CreateRuneBalancesParams struct {
PkScript []byte
RuneId runes.RuneId
Balance uint128.Uint128
BlockHeight uint64
}

View File

@@ -11,6 +11,7 @@ import (
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/runes/constants"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
@@ -28,7 +29,7 @@ func (p *Processor) calculateEventHash(header types.BlockHeader) (chainhash.Hash
func (p *Processor) getHashPayload(header types.BlockHeader) ([]byte, error) {
var sb strings.Builder
sb.WriteString("payload:v" + strconv.Itoa(EventHashVersion) + ":")
sb.WriteString("payload:v" + strconv.Itoa(constants.EventHashVersion) + ":")
sb.WriteString("blockHash:")
sb.Write(header.Hash[:])

View File

@@ -0,0 +1,23 @@
package entity
import (
"github.com/btcsuite/btcd/wire"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
)
type RunesUTXOBalance struct {
RuneId runes.RuneId
Amount uint128.Uint128
}
type RunesUTXO struct {
PkScript []byte
OutPoint wire.OutPoint
RuneBalances []RunesUTXOBalance
}
type RunesUTXOWithSats struct {
RunesUTXO
Sats int64
}

View File

@@ -4,13 +4,13 @@ import (
"context"
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/indexer"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/runes/constants"
"github.com/gaze-network/indexer-network/modules/runes/datagateway"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/runes"
@@ -19,7 +19,6 @@ import (
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
"github.com/gaze-network/indexer-network/pkg/reportingclient"
"github.com/gaze-network/uint128"
"github.com/samber/lo"
)
// Make sure to implement the Bitcoin Processor interface
@@ -31,6 +30,7 @@ type Processor struct {
bitcoinClient btcclient.Contract
network common.Network
reportingClient *reportingclient.ReportingClient
cleanupFuncs []func(context.Context) error
newRuneEntries map[runes.RuneId]*runes.RuneEntry
newRuneEntryStates map[runes.RuneId]*runes.RuneEntry
@@ -40,13 +40,14 @@ type Processor struct {
newRuneTxs []*entity.RuneTransaction
}
func NewProcessor(runesDg datagateway.RunesDataGateway, indexerInfoDg datagateway.IndexerInfoDataGateway, bitcoinClient btcclient.Contract, network common.Network, reportingClient *reportingclient.ReportingClient) *Processor {
func NewProcessor(runesDg datagateway.RunesDataGateway, indexerInfoDg datagateway.IndexerInfoDataGateway, bitcoinClient btcclient.Contract, network common.Network, reportingClient *reportingclient.ReportingClient, cleanupFuncs []func(context.Context) error) *Processor {
return &Processor{
runesDg: runesDg,
indexerInfoDg: indexerInfoDg,
bitcoinClient: bitcoinClient,
network: network,
reportingClient: reportingClient,
cleanupFuncs: cleanupFuncs,
newRuneEntries: make(map[runes.RuneId]*runes.RuneEntry),
newRuneEntryStates: make(map[runes.RuneId]*runes.RuneEntry),
newOutPointBalances: make(map[wire.OutPoint][]*entity.OutPointBalance),
@@ -66,8 +67,8 @@ func (p *Processor) VerifyStates(ctx context.Context) error {
if err := p.ensureValidState(ctx); err != nil {
return errors.Wrap(err, "error during ensureValidState")
}
if p.network == common.NetworkMainnet {
if err := p.ensureGenesisRune(ctx); err != nil {
if constants.NetworkHasGenesisRune(p.network) {
if err := p.ensureGenesisRune(ctx, p.network); err != nil {
return errors.Wrap(err, "error during ensureGenesisRune")
}
}
@@ -87,17 +88,17 @@ func (p *Processor) ensureValidState(ctx context.Context) error {
// if not found, set indexer state
if errors.Is(err, errs.NotFound) {
if err := p.indexerInfoDg.SetIndexerState(ctx, entity.IndexerState{
DBVersion: DBVersion,
EventHashVersion: EventHashVersion,
DBVersion: constants.DBVersion,
EventHashVersion: constants.EventHashVersion,
}); err != nil {
return errors.Wrap(err, "failed to set indexer state")
}
} else {
if indexerState.DBVersion != DBVersion {
return errors.Wrapf(errs.ConflictSetting, "db version mismatch: current version is %d. Please upgrade to version %d", indexerState.DBVersion, DBVersion)
if indexerState.DBVersion != constants.DBVersion {
return errors.Wrapf(errs.ConflictSetting, "db version mismatch: current version is %d. Please upgrade to version %d", indexerState.DBVersion, constants.DBVersion)
}
if indexerState.EventHashVersion != EventHashVersion {
return errors.Wrapf(errs.ConflictSetting, "event version mismatch: current version is %d. Please reset rune's db first.", indexerState.EventHashVersion, EventHashVersion)
if indexerState.EventHashVersion != constants.EventHashVersion {
return errors.Wrapf(errs.ConflictSetting, "event version mismatch: current version is %d. Please reset rune's db first.", indexerState.EventHashVersion, constants.EventHashVersion)
}
}
@@ -117,39 +118,37 @@ func (p *Processor) ensureValidState(ctx context.Context) error {
return nil
}
var genesisRuneId = runes.RuneId{BlockHeight: 1, TxIndex: 0}
func (p *Processor) ensureGenesisRune(ctx context.Context) error {
_, err := p.runesDg.GetRuneEntryByRuneId(ctx, genesisRuneId)
func (p *Processor) ensureGenesisRune(ctx context.Context, network common.Network) error {
genesisRuneConfig, ok := constants.GenesisRuneConfigMap[network]
if !ok {
logger.Panic("genesis rune config not found", slogx.Stringer("network", network))
}
_, err := p.runesDg.GetRuneEntryByRuneId(ctx, genesisRuneConfig.RuneId)
if err != nil && !errors.Is(err, errs.NotFound) {
return errors.Wrap(err, "failed to get genesis rune entry")
}
if errors.Is(err, errs.NotFound) {
runeEntry := &runes.RuneEntry{
RuneId: genesisRuneId,
Number: 0,
Divisibility: 0,
Premine: uint128.Zero,
SpacedRune: runes.NewSpacedRune(runes.NewRune(2055900680524219742), 0b10000000),
Symbol: '\u29c9',
Terms: &runes.Terms{
Amount: lo.ToPtr(uint128.From64(1)),
Cap: &uint128.Max,
HeightStart: lo.ToPtr(uint64(common.HalvingInterval * 4)),
HeightEnd: lo.ToPtr(uint64(common.HalvingInterval * 5)),
OffsetStart: nil,
OffsetEnd: nil,
},
Turbo: true,
RuneId: genesisRuneConfig.RuneId,
Number: genesisRuneConfig.Number,
Divisibility: genesisRuneConfig.Divisibility,
Premine: genesisRuneConfig.Premine,
SpacedRune: genesisRuneConfig.SpacedRune,
Symbol: genesisRuneConfig.Symbol,
Terms: genesisRuneConfig.Terms,
Turbo: genesisRuneConfig.Turbo,
Mints: uint128.Zero,
BurnedAmount: uint128.Zero,
CompletedAt: time.Time{},
CompletedAtHeight: nil,
EtchingBlock: 1,
EtchingTxHash: chainhash.Hash{},
EtchedAt: time.Time{},
EtchingBlock: genesisRuneConfig.RuneId.BlockHeight,
EtchingTxHash: genesisRuneConfig.EtchingTxHash,
EtchedAt: genesisRuneConfig.EtchedAt,
}
if err := p.runesDg.CreateRuneEntry(ctx, runeEntry, genesisRuneId.BlockHeight); err != nil {
if err := p.runesDg.CreateRuneEntries(ctx, []*runes.RuneEntry{runeEntry}); err != nil {
return errors.Wrap(err, "failed to create genesis rune entry")
}
if err := p.runesDg.CreateRuneEntryStates(ctx, []*runes.RuneEntry{runeEntry}, genesisRuneConfig.RuneId.BlockHeight); err != nil {
return errors.Wrap(err, "failed to create genesis rune entry")
}
}
@@ -164,7 +163,7 @@ func (p *Processor) CurrentBlock(ctx context.Context) (types.BlockHeader, error)
blockHeader, err := p.runesDg.GetLatestBlock(ctx)
if err != nil {
if errors.Is(err, errs.NotFound) {
return startingBlockHeader[p.network], nil
return constants.StartingBlockHeader[p.network], nil
}
return types.BlockHeader{}, errors.Wrap(err, "failed to get latest block")
}
@@ -228,3 +227,13 @@ func (p *Processor) RevertData(ctx context.Context, from int64) error {
}
return nil
}
func (p *Processor) Shutdown(ctx context.Context) error {
var errs []error
for _, cleanup := range p.cleanupFuncs {
if err := cleanup(ctx); err != nil {
errs = append(errs, err)
}
}
return errors.WithStack(errors.Join(errs...))
}

View File

@@ -13,7 +13,7 @@ import (
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/runes/datagateway"
"github.com/gaze-network/indexer-network/modules/runes/constants"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/indexer-network/pkg/logger"
@@ -26,19 +26,26 @@ import (
func (p *Processor) Process(ctx context.Context, blocks []*types.Block) error {
for _, block := range blocks {
ctx := logger.WithContext(ctx, slog.Int64("height", block.Header.Height))
logger.DebugContext(ctx, "Processing new block", slog.Int("txs", len(block.Transactions)))
logger.InfoContext(ctx, "Processing new block",
slogx.String("event", "runes_processor_processing_block"),
slog.Int("txs", len(block.Transactions)),
)
start := time.Now()
for _, tx := range block.Transactions {
if err := p.processTx(ctx, tx, block.Header); err != nil {
return errors.Wrap(err, "failed to process tx")
}
}
timeTakenToProcess := time.Since(start)
logger.InfoContext(ctx, "Processed block",
slogx.String("event", "runes_processor_processed_block"),
slog.Duration("time_taken", timeTakenToProcess),
)
if err := p.flushBlock(ctx, block.Header); err != nil {
return errors.Wrap(err, "failed to flush block")
}
logger.DebugContext(ctx, "Inserted new block")
}
return nil
}
@@ -146,7 +153,7 @@ func (p *Processor) processTx(ctx context.Context, tx *types.Transaction, blockH
// find all non-OP_RETURN outputs
var destinations []int
for i, txOut := range tx.TxOut {
if txOut.IsOpReturn() {
if !txOut.IsOpReturn() {
destinations = append(destinations, i)
}
}
@@ -466,7 +473,7 @@ func (p *Processor) txCommitsToRune(ctx context.Context, tx *types.Transaction,
// It is impossible to verify that input utxo is a P2TR output with just the input.
// Need to verify with utxo's pk script.
prevTx, err := p.bitcoinClient.GetTransactionByHash(ctx, txIn.PreviousOutTxHash)
prevTx, blockHeight, err := p.bitcoinClient.GetRawTransactionAndHeightByTxHash(ctx, txIn.PreviousOutTxHash)
if err != nil && errors.Is(err, errs.NotFound) {
continue
}
@@ -479,7 +486,7 @@ func (p *Processor) txCommitsToRune(ctx context.Context, tx *types.Transaction,
break
}
// input must be mature enough
confirmations := tx.BlockHeight - prevTx.BlockHeight + 1
confirmations := tx.BlockHeight - blockHeight + 1
if confirmations < runes.RUNE_COMMIT_BLOCKS {
continue
}
@@ -668,6 +675,7 @@ func (p *Processor) getRunesBalancesAtOutPoint(ctx context.Context, outPoint wir
}
func (p *Processor) flushBlock(ctx context.Context, blockHeader types.BlockHeader) error {
start := time.Now()
runesDgTx, err := p.runesDg.BeginRunesTx(ctx)
if err != nil {
return errors.Wrap(err, "failed to begin runes tx")
@@ -687,10 +695,10 @@ func (p *Processor) flushBlock(ctx context.Context, blockHeader types.BlockHeade
return errors.Wrap(err, "failed to calculate event hash")
}
prevIndexedBlock, err := runesDgTx.GetIndexedBlockByHeight(ctx, blockHeader.Height-1)
if err != nil && errors.Is(err, errs.NotFound) && blockHeader.Height-1 == startingBlockHeader[p.network].Height {
if err != nil && errors.Is(err, errs.NotFound) && blockHeader.Height-1 == constants.StartingBlockHeader[p.network].Height {
prevIndexedBlock = &entity.IndexedBlock{
Height: startingBlockHeader[p.network].Height,
Hash: startingBlockHeader[p.network].Hash,
Height: constants.StartingBlockHeader[p.network].Height,
Hash: chainhash.Hash{},
EventHash: chainhash.Hash{},
CumulativeEventHash: chainhash.Hash{},
}
@@ -714,86 +722,90 @@ func (p *Processor) flushBlock(ctx context.Context, blockHeader types.BlockHeade
return errors.Wrap(err, "failed to create indexed block")
}
// flush new rune entries
{
for _, runeEntry := range p.newRuneEntries {
if err := runesDgTx.CreateRuneEntry(ctx, runeEntry, uint64(blockHeader.Height)); err != nil {
return errors.Wrap(err, "failed to create rune entry")
}
}
p.newRuneEntries = make(map[runes.RuneId]*runes.RuneEntry)
newRuneEntries := lo.Values(p.newRuneEntries)
if err := runesDgTx.CreateRuneEntries(ctx, newRuneEntries); err != nil {
return errors.Wrap(err, "failed to create rune entry")
}
p.newRuneEntries = make(map[runes.RuneId]*runes.RuneEntry)
// flush new rune entry states
{
for _, runeEntry := range p.newRuneEntryStates {
if err := runesDgTx.CreateRuneEntryState(ctx, runeEntry, uint64(blockHeader.Height)); err != nil {
return errors.Wrap(err, "failed to create rune entry state")
}
}
p.newRuneEntryStates = make(map[runes.RuneId]*runes.RuneEntry)
newRuneEntryStates := lo.Values(p.newRuneEntryStates)
if err := runesDgTx.CreateRuneEntryStates(ctx, newRuneEntryStates, uint64(blockHeader.Height)); err != nil {
return errors.Wrap(err, "failed to create rune entry state")
}
p.newRuneEntryStates = make(map[runes.RuneId]*runes.RuneEntry)
// flush new outpoint balances
{
newBalances := make([]*entity.OutPointBalance, 0)
for _, balances := range p.newOutPointBalances {
newBalances = append(newBalances, balances...)
}
if err := runesDgTx.CreateOutPointBalances(ctx, newBalances); err != nil {
return errors.Wrap(err, "failed to create outpoint balances")
}
p.newOutPointBalances = make(map[wire.OutPoint][]*entity.OutPointBalance)
newOutpointBalances := make([]*entity.OutPointBalance, 0)
for _, balances := range p.newOutPointBalances {
newOutpointBalances = append(newOutpointBalances, balances...)
}
if err := runesDgTx.CreateOutPointBalances(ctx, newOutpointBalances); err != nil {
return errors.Wrap(err, "failed to create outpoint balances")
}
p.newOutPointBalances = make(map[wire.OutPoint][]*entity.OutPointBalance)
// flush new spend outpoints
{
for _, outPoint := range p.newSpendOutPoints {
if err := runesDgTx.SpendOutPointBalances(ctx, outPoint, uint64(blockHeader.Height)); err != nil {
return errors.Wrap(err, "failed to create spend outpoint")
}
}
p.newSpendOutPoints = make([]wire.OutPoint, 0)
newSpendOutPoints := p.newSpendOutPoints
if err := runesDgTx.SpendOutPointBalancesBatch(ctx, newSpendOutPoints, uint64(blockHeader.Height)); err != nil {
return errors.Wrap(err, "failed to create spend outpoint")
}
// flush new balances
{
params := make([]datagateway.CreateRuneBalancesParams, 0)
for pkScriptStr, balances := range p.newBalances {
pkScript, err := hex.DecodeString(pkScriptStr)
if err != nil {
return errors.Wrap(err, "failed to decode pk script")
}
for runeId, balance := range balances {
params = append(params, datagateway.CreateRuneBalancesParams{
PkScript: pkScript,
RuneId: runeId,
Balance: balance,
BlockHeight: uint64(blockHeader.Height),
})
}
p.newSpendOutPoints = make([]wire.OutPoint, 0)
// flush new newBalances
newBalances := make([]*entity.Balance, 0)
for pkScriptStr, balances := range p.newBalances {
pkScript, err := hex.DecodeString(pkScriptStr)
if err != nil {
return errors.Wrap(err, "failed to decode pk script")
}
if err := runesDgTx.CreateRuneBalances(ctx, params); err != nil {
return errors.Wrap(err, "failed to create balances at block")
for runeId, balance := range balances {
newBalances = append(newBalances, &entity.Balance{
PkScript: pkScript,
RuneId: runeId,
Amount: balance,
BlockHeight: uint64(blockHeader.Height),
})
}
p.newBalances = make(map[string]map[runes.RuneId]uint128.Uint128)
}
if err := runesDgTx.CreateRuneBalances(ctx, newBalances); err != nil {
return errors.Wrap(err, "failed to create balances at block")
}
p.newBalances = make(map[string]map[runes.RuneId]uint128.Uint128)
// flush new rune transactions
{
for _, runeTx := range p.newRuneTxs {
if err := runesDgTx.CreateRuneTransaction(ctx, runeTx); err != nil {
return errors.Wrap(err, "failed to create rune transaction")
}
}
p.newRuneTxs = make([]*entity.RuneTransaction, 0)
newRuneTxs := p.newRuneTxs
if err := runesDgTx.CreateRuneTransactions(ctx, newRuneTxs); err != nil {
return errors.Wrap(err, "failed to create rune transaction")
}
p.newRuneTxs = make([]*entity.RuneTransaction, 0)
if err := runesDgTx.Commit(ctx); err != nil {
return errors.Wrap(err, "failed to commit runes tx")
}
timeTaken := time.Since(start)
logger.InfoContext(ctx, "Flushed block",
slogx.String("event", "runes_processor_flushed_block"),
slog.Int64("height", blockHeader.Height),
slog.String("hash", blockHeader.Hash.String()),
slog.String("event_hash", hex.EncodeToString(eventHash[:])),
slog.String("cumulative_event_hash", hex.EncodeToString(cumulativeEventHash[:])),
slog.Int("new_rune_entries", len(newRuneEntries)),
slog.Int("new_rune_entry_states", len(newRuneEntryStates)),
slog.Int("new_outpoint_balances", len(newOutpointBalances)),
slog.Int("new_spend_outpoints", len(newSpendOutPoints)),
slog.Int("new_balances", len(newBalances)),
slog.Int("new_rune_txs", len(newRuneTxs)),
slogx.Duration("time_taken", timeTaken),
)
// submit event to reporting system
if p.reportingClient != nil {
if err := p.reportingClient.SubmitBlockReport(ctx, reportingclient.SubmitBlockReportPayload{
Type: "runes",
ClientVersion: Version,
DBVersion: DBVersion,
EventHashVersion: EventHashVersion,
ClientVersion: constants.Version,
DBVersion: constants.DBVersion,
EventHashVersion: constants.EventHashVersion,
Network: p.network,
BlockHeight: uint64(blockHeader.Height),
BlockHash: blockHeader.Hash,

View File

@@ -1,130 +0,0 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
// source: batch.go
package gen
import (
"context"
"errors"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgtype"
)
var (
ErrBatchAlreadyClosed = errors.New("batch already closed")
)
const createOutPointBalances = `-- name: CreateOutPointBalances :batchexec
INSERT INTO runes_outpoint_balances (rune_id, pkscript, tx_hash, tx_idx, amount, block_height, spent_height) VALUES ($1, $2, $3, $4, $5, $6, $7)
`
type CreateOutPointBalancesBatchResults struct {
br pgx.BatchResults
tot int
closed bool
}
type CreateOutPointBalancesParams struct {
RuneID string
Pkscript string
TxHash string
TxIdx int32
Amount pgtype.Numeric
BlockHeight int32
SpentHeight pgtype.Int4
}
func (q *Queries) CreateOutPointBalances(ctx context.Context, arg []CreateOutPointBalancesParams) *CreateOutPointBalancesBatchResults {
batch := &pgx.Batch{}
for _, a := range arg {
vals := []interface{}{
a.RuneID,
a.Pkscript,
a.TxHash,
a.TxIdx,
a.Amount,
a.BlockHeight,
a.SpentHeight,
}
batch.Queue(createOutPointBalances, vals...)
}
br := q.db.SendBatch(ctx, batch)
return &CreateOutPointBalancesBatchResults{br, len(arg), false}
}
func (b *CreateOutPointBalancesBatchResults) Exec(f func(int, error)) {
defer b.br.Close()
for t := 0; t < b.tot; t++ {
if b.closed {
if f != nil {
f(t, ErrBatchAlreadyClosed)
}
continue
}
_, err := b.br.Exec()
if f != nil {
f(t, err)
}
}
}
func (b *CreateOutPointBalancesBatchResults) Close() error {
b.closed = true
return b.br.Close()
}
const createRuneBalanceAtBlock = `-- name: CreateRuneBalanceAtBlock :batchexec
INSERT INTO runes_balances (pkscript, block_height, rune_id, amount) VALUES ($1, $2, $3, $4)
`
type CreateRuneBalanceAtBlockBatchResults struct {
br pgx.BatchResults
tot int
closed bool
}
type CreateRuneBalanceAtBlockParams struct {
Pkscript string
BlockHeight int32
RuneID string
Amount pgtype.Numeric
}
func (q *Queries) CreateRuneBalanceAtBlock(ctx context.Context, arg []CreateRuneBalanceAtBlockParams) *CreateRuneBalanceAtBlockBatchResults {
batch := &pgx.Batch{}
for _, a := range arg {
vals := []interface{}{
a.Pkscript,
a.BlockHeight,
a.RuneID,
a.Amount,
}
batch.Queue(createRuneBalanceAtBlock, vals...)
}
br := q.db.SendBatch(ctx, batch)
return &CreateRuneBalanceAtBlockBatchResults{br, len(arg), false}
}
func (b *CreateRuneBalanceAtBlockBatchResults) Exec(f func(int, error)) {
defer b.br.Close()
for t := 0; t < b.tot; t++ {
if b.closed {
if f != nil {
f(t, ErrBatchAlreadyClosed)
}
continue
}
_, err := b.br.Exec()
if f != nil {
f(t, err)
}
}
}
func (b *CreateRuneBalanceAtBlockBatchResults) Close() error {
b.closed = true
return b.br.Close()
}

View File

@@ -0,0 +1,319 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.27.0
// source: batch.sql
package gen
import (
"context"
"github.com/jackc/pgx/v5/pgtype"
)
const batchCreateRuneEntries = `-- name: BatchCreateRuneEntries :exec
INSERT INTO runes_entries ("rune_id", "rune", "number", "spacers", "premine", "symbol", "divisibility", "terms", "terms_amount", "terms_cap", "terms_height_start", "terms_height_end", "terms_offset_start", "terms_offset_end", "turbo", "etching_block", "etching_tx_hash", "etched_at")
VALUES(
unnest($1::TEXT[]),
unnest($2::TEXT[]),
unnest($3::BIGINT[]),
unnest($4::INT[]),
unnest($5::DECIMAL[]),
unnest($6::INT[]),
unnest($7::SMALLINT[]),
unnest($8::BOOLEAN[]),
unnest($9::DECIMAL[]),
unnest($10::DECIMAL[]),
unnest($11::INT[]), -- nullable (need patch)
unnest($12::INT[]), -- nullable (need patch)
unnest($13::INT[]), -- nullable (need patch)
unnest($14::INT[]), -- nullable (need patch)
unnest($15::BOOLEAN[]),
unnest($16::INT[]),
unnest($17::TEXT[]),
unnest($18::TIMESTAMP[])
)
`
type BatchCreateRuneEntriesParams struct {
RuneIDArr []string
RuneArr []string
NumberArr []int64
SpacersArr []int32
PremineArr []pgtype.Numeric
SymbolArr []int32
DivisibilityArr []int16
TermsArr []bool
TermsAmountArr []pgtype.Numeric
TermsCapArr []pgtype.Numeric
TermsHeightStartArr []int32
TermsHeightEndArr []int32
TermsOffsetStartArr []int32
TermsOffsetEndArr []int32
TurboArr []bool
EtchingBlockArr []int32
EtchingTxHashArr []string
EtchedAtArr []pgtype.Timestamp
}
func (q *Queries) BatchCreateRuneEntries(ctx context.Context, arg BatchCreateRuneEntriesParams) error {
_, err := q.db.Exec(ctx, batchCreateRuneEntries,
arg.RuneIDArr,
arg.RuneArr,
arg.NumberArr,
arg.SpacersArr,
arg.PremineArr,
arg.SymbolArr,
arg.DivisibilityArr,
arg.TermsArr,
arg.TermsAmountArr,
arg.TermsCapArr,
arg.TermsHeightStartArr,
arg.TermsHeightEndArr,
arg.TermsOffsetStartArr,
arg.TermsOffsetEndArr,
arg.TurboArr,
arg.EtchingBlockArr,
arg.EtchingTxHashArr,
arg.EtchedAtArr,
)
return err
}
const batchCreateRuneEntryStates = `-- name: BatchCreateRuneEntryStates :exec
INSERT INTO runes_entry_states ("rune_id", "block_height", "mints", "burned_amount", "completed_at", "completed_at_height")
VALUES(
unnest($1::TEXT[]),
unnest($2::INT[]),
unnest($3::DECIMAL[]),
unnest($4::DECIMAL[]),
unnest($5::TIMESTAMP[]),
unnest($6::INT[]) -- nullable (need patch)
)
`
type BatchCreateRuneEntryStatesParams struct {
RuneIDArr []string
BlockHeightArr []int32
MintsArr []pgtype.Numeric
BurnedAmountArr []pgtype.Numeric
CompletedAtArr []pgtype.Timestamp
CompletedAtHeightArr []int32
}
func (q *Queries) BatchCreateRuneEntryStates(ctx context.Context, arg BatchCreateRuneEntryStatesParams) error {
_, err := q.db.Exec(ctx, batchCreateRuneEntryStates,
arg.RuneIDArr,
arg.BlockHeightArr,
arg.MintsArr,
arg.BurnedAmountArr,
arg.CompletedAtArr,
arg.CompletedAtHeightArr,
)
return err
}
const batchCreateRuneTransactions = `-- name: BatchCreateRuneTransactions :exec
INSERT INTO runes_transactions ("hash", "block_height", "index", "timestamp", "inputs", "outputs", "mints", "burns", "rune_etched")
VALUES (
unnest($1::TEXT[]),
unnest($2::INT[]),
unnest($3::INT[]),
unnest($4::TIMESTAMP[]),
unnest($5::JSONB[]),
unnest($6::JSONB[]),
unnest($7::JSONB[]),
unnest($8::JSONB[]),
unnest($9::BOOLEAN[])
)
`
type BatchCreateRuneTransactionsParams struct {
HashArr []string
BlockHeightArr []int32
IndexArr []int32
TimestampArr []pgtype.Timestamp
InputsArr [][]byte
OutputsArr [][]byte
MintsArr [][]byte
BurnsArr [][]byte
RuneEtchedArr []bool
}
func (q *Queries) BatchCreateRuneTransactions(ctx context.Context, arg BatchCreateRuneTransactionsParams) error {
_, err := q.db.Exec(ctx, batchCreateRuneTransactions,
arg.HashArr,
arg.BlockHeightArr,
arg.IndexArr,
arg.TimestampArr,
arg.InputsArr,
arg.OutputsArr,
arg.MintsArr,
arg.BurnsArr,
arg.RuneEtchedArr,
)
return err
}
const batchCreateRunesBalances = `-- name: BatchCreateRunesBalances :exec
INSERT INTO runes_balances ("pkscript", "block_height", "rune_id", "amount")
VALUES(
unnest($1::TEXT[]),
unnest($2::INT[]),
unnest($3::TEXT[]),
unnest($4::DECIMAL[])
)
`
type BatchCreateRunesBalancesParams struct {
PkscriptArr []string
BlockHeightArr []int32
RuneIDArr []string
AmountArr []pgtype.Numeric
}
func (q *Queries) BatchCreateRunesBalances(ctx context.Context, arg BatchCreateRunesBalancesParams) error {
_, err := q.db.Exec(ctx, batchCreateRunesBalances,
arg.PkscriptArr,
arg.BlockHeightArr,
arg.RuneIDArr,
arg.AmountArr,
)
return err
}
const batchCreateRunesOutpointBalances = `-- name: BatchCreateRunesOutpointBalances :exec
INSERT INTO runes_outpoint_balances ("rune_id", "pkscript", "tx_hash", "tx_idx", "amount", "block_height", "spent_height")
VALUES(
unnest($1::TEXT[]),
unnest($2::TEXT[]),
unnest($3::TEXT[]),
unnest($4::INT[]),
unnest($5::DECIMAL[]),
unnest($6::INT[]),
unnest($7::INT[]) -- nullable (need patch)
)
`
type BatchCreateRunesOutpointBalancesParams struct {
RuneIDArr []string
PkscriptArr []string
TxHashArr []string
TxIdxArr []int32
AmountArr []pgtype.Numeric
BlockHeightArr []int32
SpentHeightArr []int32
}
func (q *Queries) BatchCreateRunesOutpointBalances(ctx context.Context, arg BatchCreateRunesOutpointBalancesParams) error {
_, err := q.db.Exec(ctx, batchCreateRunesOutpointBalances,
arg.RuneIDArr,
arg.PkscriptArr,
arg.TxHashArr,
arg.TxIdxArr,
arg.AmountArr,
arg.BlockHeightArr,
arg.SpentHeightArr,
)
return err
}
const batchCreateRunestones = `-- name: BatchCreateRunestones :exec
INSERT INTO runes_runestones ("tx_hash", "block_height", "etching", "etching_divisibility", "etching_premine", "etching_rune", "etching_spacers", "etching_symbol", "etching_terms", "etching_terms_amount", "etching_terms_cap", "etching_terms_height_start", "etching_terms_height_end", "etching_terms_offset_start", "etching_terms_offset_end", "etching_turbo", "edicts", "mint", "pointer", "cenotaph", "flaws")
VALUES(
unnest($1::TEXT[]),
unnest($2::INT[]),
unnest($3::BOOLEAN[]),
unnest($4::SMALLINT[]), -- nullable (need patch)
unnest($5::DECIMAL[]),
unnest($6::TEXT[]), -- nullable (need patch)
unnest($7::INT[]), -- nullable (need patch)
unnest($8::INT[]), -- nullable (need patch)
unnest($9::BOOLEAN[]), -- nullable (need patch)
unnest($10::DECIMAL[]),
unnest($11::DECIMAL[]),
unnest($12::INT[]), -- nullable (need patch)
unnest($13::INT[]), -- nullable (need patch)
unnest($14::INT[]), -- nullable (need patch)
unnest($15::INT[]), -- nullable (need patch)
unnest($16::BOOLEAN[]), -- nullable (need patch)
unnest($17::JSONB[]),
unnest($18::TEXT[]), -- nullable (need patch)
unnest($19::INT[]), -- nullable (need patch)
unnest($20::BOOLEAN[]),
unnest($21::INT[])
)
`
type BatchCreateRunestonesParams struct {
TxHashArr []string
BlockHeightArr []int32
EtchingArr []bool
EtchingDivisibilityArr []int16
EtchingPremineArr []pgtype.Numeric
EtchingRuneArr []string
EtchingSpacersArr []int32
EtchingSymbolArr []int32
EtchingTermsArr []bool
EtchingTermsAmountArr []pgtype.Numeric
EtchingTermsCapArr []pgtype.Numeric
EtchingTermsHeightStartArr []int32
EtchingTermsHeightEndArr []int32
EtchingTermsOffsetStartArr []int32
EtchingTermsOffsetEndArr []int32
EtchingTurboArr []bool
EdictsArr [][]byte
MintArr []string
PointerArr []int32
CenotaphArr []bool
FlawsArr []int32
}
func (q *Queries) BatchCreateRunestones(ctx context.Context, arg BatchCreateRunestonesParams) error {
_, err := q.db.Exec(ctx, batchCreateRunestones,
arg.TxHashArr,
arg.BlockHeightArr,
arg.EtchingArr,
arg.EtchingDivisibilityArr,
arg.EtchingPremineArr,
arg.EtchingRuneArr,
arg.EtchingSpacersArr,
arg.EtchingSymbolArr,
arg.EtchingTermsArr,
arg.EtchingTermsAmountArr,
arg.EtchingTermsCapArr,
arg.EtchingTermsHeightStartArr,
arg.EtchingTermsHeightEndArr,
arg.EtchingTermsOffsetStartArr,
arg.EtchingTermsOffsetEndArr,
arg.EtchingTurboArr,
arg.EdictsArr,
arg.MintArr,
arg.PointerArr,
arg.CenotaphArr,
arg.FlawsArr,
)
return err
}
const batchSpendOutpointBalances = `-- name: BatchSpendOutpointBalances :exec
UPDATE runes_outpoint_balances
SET "spent_height" = $1::INT
FROM (
SELECT
unnest($2::TEXT[]) AS tx_hash,
unnest($3::INT[]) AS tx_idx
) AS input
WHERE "runes_outpoint_balances"."tx_hash" = "input"."tx_hash" AND "runes_outpoint_balances"."tx_idx" = "input"."tx_idx"
`
type BatchSpendOutpointBalancesParams struct {
SpentHeight int32
TxHashArr []string
TxIdxArr []int32
}
func (q *Queries) BatchSpendOutpointBalances(ctx context.Context, arg BatchSpendOutpointBalancesParams) error {
_, err := q.db.Exec(ctx, batchSpendOutpointBalances, arg.SpentHeight, arg.TxHashArr, arg.TxIdxArr)
return err
}

View File

@@ -0,0 +1,118 @@
package gen
import (
"context"
"github.com/cockroachdb/errors"
"github.com/jackc/pgx/v5/pgtype"
)
type BatchCreateRuneEntriesPatchedParams struct {
BatchCreateRuneEntriesParams
TermsHeightStartArr []pgtype.Int4
TermsHeightEndArr []pgtype.Int4
TermsOffsetStartArr []pgtype.Int4
TermsOffsetEndArr []pgtype.Int4
}
func (q *Queries) BatchCreateRuneEntriesPatched(ctx context.Context, arg BatchCreateRuneEntriesPatchedParams) error {
_, err := q.db.Exec(ctx, batchCreateRuneEntries,
arg.RuneIDArr,
arg.RuneArr,
arg.NumberArr,
arg.SpacersArr,
arg.PremineArr,
arg.SymbolArr,
arg.DivisibilityArr,
arg.TermsArr,
arg.TermsAmountArr,
arg.TermsCapArr,
arg.TermsHeightStartArr,
arg.TermsHeightEndArr,
arg.TermsOffsetStartArr,
arg.TermsOffsetEndArr,
arg.TurboArr,
arg.EtchingBlockArr,
arg.EtchingTxHashArr,
arg.EtchedAtArr,
)
return errors.WithStack(err)
}
type BatchCreateRuneEntryStatesPatchedParams struct {
BatchCreateRuneEntryStatesParams
CompletedAtHeightArr []pgtype.Int4
}
func (q *Queries) BatchCreateRuneEntryStatesPatched(ctx context.Context, arg BatchCreateRuneEntryStatesPatchedParams) error {
_, err := q.db.Exec(ctx, batchCreateRuneEntryStates,
arg.RuneIDArr,
arg.BlockHeightArr,
arg.MintsArr,
arg.BurnedAmountArr,
arg.CompletedAtArr,
arg.CompletedAtHeightArr,
)
return errors.WithStack(err)
}
type BatchCreateRunesOutpointBalancesPatchedParams struct {
BatchCreateRunesOutpointBalancesParams
SpentHeightArr []pgtype.Int4
}
func (q *Queries) BatchCreateRunesOutpointBalancesPatched(ctx context.Context, arg BatchCreateRunesOutpointBalancesPatchedParams) error {
_, err := q.db.Exec(ctx, batchCreateRunesOutpointBalances,
arg.RuneIDArr,
arg.PkscriptArr,
arg.TxHashArr,
arg.TxIdxArr,
arg.AmountArr,
arg.BlockHeightArr,
arg.SpentHeightArr,
)
return errors.WithStack(err)
}
type BatchCreateRunestonesPatchedParams struct {
BatchCreateRunestonesParams
EtchingDivisibilityArr []pgtype.Int2
EtchingRuneArr []pgtype.Text
EtchingSpacersArr []pgtype.Int4
EtchingSymbolArr []pgtype.Int4
EtchingTermsArr []pgtype.Bool
EtchingTermsHeightStartArr []pgtype.Int4
EtchingTermsHeightEndArr []pgtype.Int4
EtchingTermsOffsetStartArr []pgtype.Int4
EtchingTermsOffsetEndArr []pgtype.Int4
EtchingTurboArr []pgtype.Bool
MintArr []pgtype.Text
PointerArr []pgtype.Int4
}
func (q *Queries) BatchCreateRunestonesPatched(ctx context.Context, arg BatchCreateRunestonesPatchedParams) error {
_, err := q.db.Exec(ctx, batchCreateRunestones,
arg.TxHashArr,
arg.BlockHeightArr,
arg.EtchingArr,
arg.EtchingDivisibilityArr,
arg.EtchingPremineArr,
arg.EtchingRuneArr,
arg.EtchingSpacersArr,
arg.EtchingSymbolArr,
arg.EtchingTermsArr,
arg.EtchingTermsAmountArr,
arg.EtchingTermsCapArr,
arg.EtchingTermsHeightStartArr,
arg.EtchingTermsHeightEndArr,
arg.EtchingTermsOffsetStartArr,
arg.EtchingTermsOffsetEndArr,
arg.EtchingTurboArr,
arg.EdictsArr,
arg.MintArr,
arg.PointerArr,
arg.CenotaphArr,
arg.FlawsArr,
)
return errors.WithStack(err)
}

View File

@@ -1,6 +1,6 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
// sqlc v1.27.0
// source: data.sql
package gen
@@ -45,6 +45,54 @@ func (q *Queries) CreateIndexedBlock(ctx context.Context, arg CreateIndexedBlock
return err
}
const createOutPointBalance = `-- name: CreateOutPointBalance :exec
INSERT INTO runes_outpoint_balances (rune_id, pkscript, tx_hash, tx_idx, amount, block_height, spent_height) VALUES ($1, $2, $3, $4, $5, $6, $7)
`
type CreateOutPointBalanceParams struct {
RuneID string
Pkscript string
TxHash string
TxIdx int32
Amount pgtype.Numeric
BlockHeight int32
SpentHeight pgtype.Int4
}
func (q *Queries) CreateOutPointBalance(ctx context.Context, arg CreateOutPointBalanceParams) error {
_, err := q.db.Exec(ctx, createOutPointBalance,
arg.RuneID,
arg.Pkscript,
arg.TxHash,
arg.TxIdx,
arg.Amount,
arg.BlockHeight,
arg.SpentHeight,
)
return err
}
const createRuneBalance = `-- name: CreateRuneBalance :exec
INSERT INTO runes_balances (pkscript, block_height, rune_id, amount) VALUES ($1, $2, $3, $4)
`
type CreateRuneBalanceParams struct {
Pkscript string
BlockHeight int32
RuneID string
Amount pgtype.Numeric
}
func (q *Queries) CreateRuneBalance(ctx context.Context, arg CreateRuneBalanceParams) error {
_, err := q.db.Exec(ctx, createRuneBalance,
arg.Pkscript,
arg.BlockHeight,
arg.RuneID,
arg.Amount,
)
return err
}
const createRuneEntry = `-- name: CreateRuneEntry :exec
INSERT INTO runes_entries (rune_id, rune, number, spacers, premine, symbol, divisibility, terms, terms_amount, terms_cap, terms_height_start, terms_height_end, terms_offset_start, terms_offset_end, turbo, etching_block, etching_tx_hash, etched_at)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18)
@@ -296,12 +344,14 @@ const getBalancesByPkScript = `-- name: GetBalancesByPkScript :many
WITH balances AS (
SELECT DISTINCT ON (rune_id) pkscript, block_height, rune_id, amount FROM runes_balances WHERE pkscript = $1 AND block_height <= $2 ORDER BY rune_id, block_height DESC
)
SELECT pkscript, block_height, rune_id, amount FROM balances WHERE amount > 0
SELECT pkscript, block_height, rune_id, amount FROM balances WHERE amount > 0 ORDER BY amount DESC, rune_id LIMIT $3 OFFSET $4
`
type GetBalancesByPkScriptParams struct {
Pkscript string
BlockHeight int32
Limit int32
Offset int32
}
type GetBalancesByPkScriptRow struct {
@@ -312,7 +362,12 @@ type GetBalancesByPkScriptRow struct {
}
func (q *Queries) GetBalancesByPkScript(ctx context.Context, arg GetBalancesByPkScriptParams) ([]GetBalancesByPkScriptRow, error) {
rows, err := q.db.Query(ctx, getBalancesByPkScript, arg.Pkscript, arg.BlockHeight)
rows, err := q.db.Query(ctx, getBalancesByPkScript,
arg.Pkscript,
arg.BlockHeight,
arg.Limit,
arg.Offset,
)
if err != nil {
return nil, err
}
@@ -340,12 +395,14 @@ const getBalancesByRuneId = `-- name: GetBalancesByRuneId :many
WITH balances AS (
SELECT DISTINCT ON (pkscript) pkscript, block_height, rune_id, amount FROM runes_balances WHERE rune_id = $1 AND block_height <= $2 ORDER BY pkscript, block_height DESC
)
SELECT pkscript, block_height, rune_id, amount FROM balances WHERE amount > 0
SELECT pkscript, block_height, rune_id, amount FROM balances WHERE amount > 0 ORDER BY amount DESC, pkscript LIMIT $3 OFFSET $4
`
type GetBalancesByRuneIdParams struct {
RuneID string
BlockHeight int32
Limit int32
Offset int32
}
type GetBalancesByRuneIdRow struct {
@@ -356,7 +413,12 @@ type GetBalancesByRuneIdRow struct {
}
func (q *Queries) GetBalancesByRuneId(ctx context.Context, arg GetBalancesByRuneIdParams) ([]GetBalancesByRuneIdRow, error) {
rows, err := q.db.Query(ctx, getBalancesByRuneId, arg.RuneID, arg.BlockHeight)
rows, err := q.db.Query(ctx, getBalancesByRuneId,
arg.RuneID,
arg.BlockHeight,
arg.Limit,
arg.Offset,
)
if err != nil {
return nil, err
}
@@ -414,6 +476,120 @@ func (q *Queries) GetLatestIndexedBlock(ctx context.Context) (RunesIndexedBlock,
return i, err
}
const getOngoingRuneEntries = `-- name: GetOngoingRuneEntries :many
WITH states AS (
-- select latest state
SELECT DISTINCT ON (rune_id) rune_id, block_height, mints, burned_amount, completed_at, completed_at_height FROM runes_entry_states WHERE block_height <= $1::integer ORDER BY rune_id, block_height DESC
)
SELECT runes_entries.rune_id, number, rune, spacers, premine, symbol, divisibility, terms, terms_amount, terms_cap, terms_height_start, terms_height_end, terms_offset_start, terms_offset_end, turbo, etching_block, etching_tx_hash, etched_at, states.rune_id, block_height, mints, burned_amount, completed_at, completed_at_height FROM runes_entries
LEFT JOIN states ON runes_entries.rune_id = states.rune_id
WHERE (
runes_entries.terms = TRUE AND
COALESCE(runes_entries.terms_amount, 0) != 0 AND
COALESCE(runes_entries.terms_cap, 0) != 0 AND
states.mints < runes_entries.terms_cap AND
(
runes_entries.terms_height_start IS NULL OR runes_entries.terms_height_start <= $1::integer
) AND (
runes_entries.terms_height_end IS NULL OR $1::integer <= runes_entries.terms_height_end
) AND (
runes_entries.terms_offset_start IS NULL OR runes_entries.terms_offset_start + runes_entries.etching_block <= $1::integer
) AND (
runes_entries.terms_offset_end IS NULL OR $1::integer <= runes_entries.terms_offset_start + runes_entries.etching_block
)
) AND (
$2::text = '' OR
runes_entries.rune ILIKE '%' || $2::text || '%'
)
ORDER BY states.mints DESC
LIMIT $4 OFFSET $3
`
type GetOngoingRuneEntriesParams struct {
Height int32
Search string
Offset int32
Limit int32
}
type GetOngoingRuneEntriesRow struct {
RuneID string
Number int64
Rune string
Spacers int32
Premine pgtype.Numeric
Symbol int32
Divisibility int16
Terms bool
TermsAmount pgtype.Numeric
TermsCap pgtype.Numeric
TermsHeightStart pgtype.Int4
TermsHeightEnd pgtype.Int4
TermsOffsetStart pgtype.Int4
TermsOffsetEnd pgtype.Int4
Turbo bool
EtchingBlock int32
EtchingTxHash string
EtchedAt pgtype.Timestamp
RuneID_2 pgtype.Text
BlockHeight pgtype.Int4
Mints pgtype.Numeric
BurnedAmount pgtype.Numeric
CompletedAt pgtype.Timestamp
CompletedAtHeight pgtype.Int4
}
func (q *Queries) GetOngoingRuneEntries(ctx context.Context, arg GetOngoingRuneEntriesParams) ([]GetOngoingRuneEntriesRow, error) {
rows, err := q.db.Query(ctx, getOngoingRuneEntries,
arg.Height,
arg.Search,
arg.Offset,
arg.Limit,
)
if err != nil {
return nil, err
}
defer rows.Close()
var items []GetOngoingRuneEntriesRow
for rows.Next() {
var i GetOngoingRuneEntriesRow
if err := rows.Scan(
&i.RuneID,
&i.Number,
&i.Rune,
&i.Spacers,
&i.Premine,
&i.Symbol,
&i.Divisibility,
&i.Terms,
&i.TermsAmount,
&i.TermsCap,
&i.TermsHeightStart,
&i.TermsHeightEnd,
&i.TermsOffsetStart,
&i.TermsOffsetEnd,
&i.Turbo,
&i.EtchingBlock,
&i.EtchingTxHash,
&i.EtchedAt,
&i.RuneID_2,
&i.BlockHeight,
&i.Mints,
&i.BurnedAmount,
&i.CompletedAt,
&i.CompletedAtHeight,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getOutPointBalancesAtOutPoint = `-- name: GetOutPointBalancesAtOutPoint :many
SELECT rune_id, pkscript, tx_hash, tx_idx, amount, block_height, spent_height FROM runes_outpoint_balances WHERE tx_hash = $1 AND tx_idx = $2
`
@@ -451,6 +627,105 @@ func (q *Queries) GetOutPointBalancesAtOutPoint(ctx context.Context, arg GetOutP
return items, nil
}
const getRuneEntries = `-- name: GetRuneEntries :many
WITH states AS (
-- select latest state
SELECT DISTINCT ON (rune_id) rune_id, block_height, mints, burned_amount, completed_at, completed_at_height FROM runes_entry_states WHERE block_height <= $4 ORDER BY rune_id, block_height DESC
)
SELECT runes_entries.rune_id, number, rune, spacers, premine, symbol, divisibility, terms, terms_amount, terms_cap, terms_height_start, terms_height_end, terms_offset_start, terms_offset_end, turbo, etching_block, etching_tx_hash, etched_at, states.rune_id, block_height, mints, burned_amount, completed_at, completed_at_height FROM runes_entries
LEFT JOIN states ON runes_entries.rune_id = states.rune_id
WHERE (
$1 = '' OR
runes_entries.rune ILIKE $1 || '%'
)
ORDER BY runes_entries.number
LIMIT $3 OFFSET $2
`
type GetRuneEntriesParams struct {
Search interface{}
Offset int32
Limit int32
Height int32
}
type GetRuneEntriesRow struct {
RuneID string
Number int64
Rune string
Spacers int32
Premine pgtype.Numeric
Symbol int32
Divisibility int16
Terms bool
TermsAmount pgtype.Numeric
TermsCap pgtype.Numeric
TermsHeightStart pgtype.Int4
TermsHeightEnd pgtype.Int4
TermsOffsetStart pgtype.Int4
TermsOffsetEnd pgtype.Int4
Turbo bool
EtchingBlock int32
EtchingTxHash string
EtchedAt pgtype.Timestamp
RuneID_2 pgtype.Text
BlockHeight pgtype.Int4
Mints pgtype.Numeric
BurnedAmount pgtype.Numeric
CompletedAt pgtype.Timestamp
CompletedAtHeight pgtype.Int4
}
func (q *Queries) GetRuneEntries(ctx context.Context, arg GetRuneEntriesParams) ([]GetRuneEntriesRow, error) {
rows, err := q.db.Query(ctx, getRuneEntries,
arg.Search,
arg.Offset,
arg.Limit,
arg.Height,
)
if err != nil {
return nil, err
}
defer rows.Close()
var items []GetRuneEntriesRow
for rows.Next() {
var i GetRuneEntriesRow
if err := rows.Scan(
&i.RuneID,
&i.Number,
&i.Rune,
&i.Spacers,
&i.Premine,
&i.Symbol,
&i.Divisibility,
&i.Terms,
&i.TermsAmount,
&i.TermsCap,
&i.TermsHeightStart,
&i.TermsHeightEnd,
&i.TermsOffsetStart,
&i.TermsOffsetEnd,
&i.Turbo,
&i.EtchingBlock,
&i.EtchingTxHash,
&i.EtchedAt,
&i.RuneID_2,
&i.BlockHeight,
&i.Mints,
&i.BurnedAmount,
&i.CompletedAt,
&i.CompletedAtHeight,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getRuneEntriesByRuneIds = `-- name: GetRuneEntriesByRuneIds :many
WITH states AS (
-- select latest state
@@ -631,26 +906,106 @@ func (q *Queries) GetRuneIdFromRune(ctx context.Context, rune string) (string, e
return rune_id, err
}
const getRuneTransaction = `-- name: GetRuneTransaction :one
SELECT hash, runes_transactions.block_height, index, timestamp, inputs, outputs, mints, burns, rune_etched, tx_hash, runes_runestones.block_height, etching, etching_divisibility, etching_premine, etching_rune, etching_spacers, etching_symbol, etching_terms, etching_terms_amount, etching_terms_cap, etching_terms_height_start, etching_terms_height_end, etching_terms_offset_start, etching_terms_offset_end, etching_turbo, edicts, mint, pointer, cenotaph, flaws FROM runes_transactions
LEFT JOIN runes_runestones ON runes_transactions.hash = runes_runestones.tx_hash
WHERE hash = $1 LIMIT 1
`
type GetRuneTransactionRow struct {
Hash string
BlockHeight int32
Index int32
Timestamp pgtype.Timestamp
Inputs []byte
Outputs []byte
Mints []byte
Burns []byte
RuneEtched bool
TxHash pgtype.Text
BlockHeight_2 pgtype.Int4
Etching pgtype.Bool
EtchingDivisibility pgtype.Int2
EtchingPremine pgtype.Numeric
EtchingRune pgtype.Text
EtchingSpacers pgtype.Int4
EtchingSymbol pgtype.Int4
EtchingTerms pgtype.Bool
EtchingTermsAmount pgtype.Numeric
EtchingTermsCap pgtype.Numeric
EtchingTermsHeightStart pgtype.Int4
EtchingTermsHeightEnd pgtype.Int4
EtchingTermsOffsetStart pgtype.Int4
EtchingTermsOffsetEnd pgtype.Int4
EtchingTurbo pgtype.Bool
Edicts []byte
Mint pgtype.Text
Pointer pgtype.Int4
Cenotaph pgtype.Bool
Flaws pgtype.Int4
}
func (q *Queries) GetRuneTransaction(ctx context.Context, hash string) (GetRuneTransactionRow, error) {
row := q.db.QueryRow(ctx, getRuneTransaction, hash)
var i GetRuneTransactionRow
err := row.Scan(
&i.Hash,
&i.BlockHeight,
&i.Index,
&i.Timestamp,
&i.Inputs,
&i.Outputs,
&i.Mints,
&i.Burns,
&i.RuneEtched,
&i.TxHash,
&i.BlockHeight_2,
&i.Etching,
&i.EtchingDivisibility,
&i.EtchingPremine,
&i.EtchingRune,
&i.EtchingSpacers,
&i.EtchingSymbol,
&i.EtchingTerms,
&i.EtchingTermsAmount,
&i.EtchingTermsCap,
&i.EtchingTermsHeightStart,
&i.EtchingTermsHeightEnd,
&i.EtchingTermsOffsetStart,
&i.EtchingTermsOffsetEnd,
&i.EtchingTurbo,
&i.Edicts,
&i.Mint,
&i.Pointer,
&i.Cenotaph,
&i.Flaws,
)
return i, err
}
const getRuneTransactions = `-- name: GetRuneTransactions :many
SELECT hash, runes_transactions.block_height, index, timestamp, inputs, outputs, mints, burns, rune_etched, tx_hash, runes_runestones.block_height, etching, etching_divisibility, etching_premine, etching_rune, etching_spacers, etching_symbol, etching_terms, etching_terms_amount, etching_terms_cap, etching_terms_height_start, etching_terms_height_end, etching_terms_offset_start, etching_terms_offset_end, etching_turbo, edicts, mint, pointer, cenotaph, flaws FROM runes_transactions
LEFT JOIN runes_runestones ON runes_transactions.hash = runes_runestones.tx_hash
WHERE (
$1::BOOLEAN = FALSE -- if @filter_pk_script is TRUE, apply pk_script filter
OR runes_transactions.outputs @> $2::JSONB
OR runes_transactions.inputs @> $2::JSONB
) AND (
$3::BOOLEAN = FALSE -- if @filter_rune_id is TRUE, apply rune_id filter
$3::BOOLEAN = FALSE -- if @filter_pk_script is TRUE, apply pk_script filter
OR runes_transactions.outputs @> $4::JSONB
OR runes_transactions.inputs @> $4::JSONB
OR runes_transactions.mints ? $5
OR runes_transactions.burns ? $5
OR (runes_transactions.rune_etched = TRUE AND runes_transactions.block_height = $6 AND runes_transactions.index = $7)
OR runes_transactions.inputs @> $4::JSONB
) AND (
$8::INT = 0 OR runes_transactions.block_height = $8::INT -- if @block_height > 0, apply block_height filter
$5::BOOLEAN = FALSE -- if @filter_rune_id is TRUE, apply rune_id filter
OR runes_transactions.outputs @> $6::JSONB
OR runes_transactions.inputs @> $6::JSONB
OR runes_transactions.mints ? $7
OR runes_transactions.burns ? $7
OR (runes_transactions.rune_etched = TRUE AND runes_transactions.block_height = $8 AND runes_transactions.index = $9)
) AND (
$10 <= runes_transactions.block_height AND runes_transactions.block_height <= $11
)
ORDER BY runes_transactions.block_height DESC, runes_transactions.index DESC LIMIT $1 OFFSET $2
`
type GetRuneTransactionsParams struct {
Limit int32
Offset int32
FilterPkScript bool
PkScriptParam []byte
FilterRuneID bool
@@ -658,7 +1013,8 @@ type GetRuneTransactionsParams struct {
RuneID []byte
RuneIDBlockHeight int32
RuneIDTxIndex int32
BlockHeight int32
FromBlock int32
ToBlock int32
}
type GetRuneTransactionsRow struct {
@@ -696,6 +1052,8 @@ type GetRuneTransactionsRow struct {
func (q *Queries) GetRuneTransactions(ctx context.Context, arg GetRuneTransactionsParams) ([]GetRuneTransactionsRow, error) {
rows, err := q.db.Query(ctx, getRuneTransactions,
arg.Limit,
arg.Offset,
arg.FilterPkScript,
arg.PkScriptParam,
arg.FilterRuneID,
@@ -703,7 +1061,8 @@ func (q *Queries) GetRuneTransactions(ctx context.Context, arg GetRuneTransactio
arg.RuneID,
arg.RuneIDBlockHeight,
arg.RuneIDTxIndex,
arg.BlockHeight,
arg.FromBlock,
arg.ToBlock,
)
if err != nil {
return nil, err
@@ -754,32 +1113,53 @@ func (q *Queries) GetRuneTransactions(ctx context.Context, arg GetRuneTransactio
return items, nil
}
const getUnspentOutPointBalancesByPkScript = `-- name: GetUnspentOutPointBalancesByPkScript :many
SELECT rune_id, pkscript, tx_hash, tx_idx, amount, block_height, spent_height FROM runes_outpoint_balances WHERE pkscript = $1 AND block_height <= $2 AND (spent_height IS NULL OR spent_height > $2)
const getRunesUTXOsByPkScript = `-- name: GetRunesUTXOsByPkScript :many
SELECT tx_hash, tx_idx, max("pkscript") as pkscript, array_agg("rune_id") as rune_ids, array_agg("amount") as amounts
FROM runes_outpoint_balances
WHERE
pkscript = $3 AND
block_height <= $4 AND
(spent_height IS NULL OR spent_height > $4)
GROUP BY tx_hash, tx_idx
ORDER BY tx_hash, tx_idx
LIMIT $1 OFFSET $2
`
type GetUnspentOutPointBalancesByPkScriptParams struct {
type GetRunesUTXOsByPkScriptParams struct {
Limit int32
Offset int32
Pkscript string
BlockHeight int32
}
func (q *Queries) GetUnspentOutPointBalancesByPkScript(ctx context.Context, arg GetUnspentOutPointBalancesByPkScriptParams) ([]RunesOutpointBalance, error) {
rows, err := q.db.Query(ctx, getUnspentOutPointBalancesByPkScript, arg.Pkscript, arg.BlockHeight)
type GetRunesUTXOsByPkScriptRow struct {
TxHash string
TxIdx int32
Pkscript interface{}
RuneIds interface{}
Amounts interface{}
}
func (q *Queries) GetRunesUTXOsByPkScript(ctx context.Context, arg GetRunesUTXOsByPkScriptParams) ([]GetRunesUTXOsByPkScriptRow, error) {
rows, err := q.db.Query(ctx, getRunesUTXOsByPkScript,
arg.Limit,
arg.Offset,
arg.Pkscript,
arg.BlockHeight,
)
if err != nil {
return nil, err
}
defer rows.Close()
var items []RunesOutpointBalance
var items []GetRunesUTXOsByPkScriptRow
for rows.Next() {
var i RunesOutpointBalance
var i GetRunesUTXOsByPkScriptRow
if err := rows.Scan(
&i.RuneID,
&i.Pkscript,
&i.TxHash,
&i.TxIdx,
&i.Amount,
&i.BlockHeight,
&i.SpentHeight,
&i.Pkscript,
&i.RuneIds,
&i.Amounts,
); err != nil {
return nil, err
}
@@ -791,18 +1171,116 @@ func (q *Queries) GetUnspentOutPointBalancesByPkScript(ctx context.Context, arg
return items, nil
}
const spendOutPointBalances = `-- name: SpendOutPointBalances :exec
const getRunesUTXOsByRuneIdAndPkScript = `-- name: GetRunesUTXOsByRuneIdAndPkScript :many
SELECT tx_hash, tx_idx, max("pkscript") as pkscript, array_agg("rune_id") as rune_ids, array_agg("amount") as amounts
FROM runes_outpoint_balances
WHERE
pkscript = $3 AND
block_height <= $4 AND
(spent_height IS NULL OR spent_height > $4)
GROUP BY tx_hash, tx_idx
HAVING array_agg("rune_id") @> $5::text[]
ORDER BY tx_hash, tx_idx
LIMIT $1 OFFSET $2
`
type GetRunesUTXOsByRuneIdAndPkScriptParams struct {
Limit int32
Offset int32
Pkscript string
BlockHeight int32
RuneIds []string
}
type GetRunesUTXOsByRuneIdAndPkScriptRow struct {
TxHash string
TxIdx int32
Pkscript interface{}
RuneIds interface{}
Amounts interface{}
}
func (q *Queries) GetRunesUTXOsByRuneIdAndPkScript(ctx context.Context, arg GetRunesUTXOsByRuneIdAndPkScriptParams) ([]GetRunesUTXOsByRuneIdAndPkScriptRow, error) {
rows, err := q.db.Query(ctx, getRunesUTXOsByRuneIdAndPkScript,
arg.Limit,
arg.Offset,
arg.Pkscript,
arg.BlockHeight,
arg.RuneIds,
)
if err != nil {
return nil, err
}
defer rows.Close()
var items []GetRunesUTXOsByRuneIdAndPkScriptRow
for rows.Next() {
var i GetRunesUTXOsByRuneIdAndPkScriptRow
if err := rows.Scan(
&i.TxHash,
&i.TxIdx,
&i.Pkscript,
&i.RuneIds,
&i.Amounts,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getTotalHoldersByRuneIds = `-- name: GetTotalHoldersByRuneIds :many
WITH balances AS (
SELECT DISTINCT ON (rune_id, pkscript) pkscript, block_height, rune_id, amount FROM runes_balances WHERE rune_id = ANY($1::TEXT[]) AND block_height <= $2 ORDER BY rune_id, pkscript, block_height DESC
)
SELECT rune_id, COUNT(DISTINCT pkscript) FROM balances WHERE amount > 0 GROUP BY rune_id
`
type GetTotalHoldersByRuneIdsParams struct {
RuneIds []string
BlockHeight int32
}
type GetTotalHoldersByRuneIdsRow struct {
RuneID string
Count int64
}
func (q *Queries) GetTotalHoldersByRuneIds(ctx context.Context, arg GetTotalHoldersByRuneIdsParams) ([]GetTotalHoldersByRuneIdsRow, error) {
rows, err := q.db.Query(ctx, getTotalHoldersByRuneIds, arg.RuneIds, arg.BlockHeight)
if err != nil {
return nil, err
}
defer rows.Close()
var items []GetTotalHoldersByRuneIdsRow
for rows.Next() {
var i GetTotalHoldersByRuneIdsRow
if err := rows.Scan(&i.RuneID, &i.Count); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const spendOutPointBalance = `-- name: SpendOutPointBalance :exec
UPDATE runes_outpoint_balances SET spent_height = $1 WHERE tx_hash = $2 AND tx_idx = $3
`
type SpendOutPointBalancesParams struct {
type SpendOutPointBalanceParams struct {
SpentHeight pgtype.Int4
TxHash string
TxIdx int32
}
func (q *Queries) SpendOutPointBalances(ctx context.Context, arg SpendOutPointBalancesParams) error {
_, err := q.db.Exec(ctx, spendOutPointBalances, arg.SpentHeight, arg.TxHash, arg.TxIdx)
func (q *Queries) SpendOutPointBalance(ctx context.Context, arg SpendOutPointBalanceParams) error {
_, err := q.db.Exec(ctx, spendOutPointBalance, arg.SpentHeight, arg.TxHash, arg.TxIdx)
return err
}

View File

@@ -1,6 +1,6 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
// sqlc v1.27.0
package gen
@@ -15,7 +15,6 @@ type DBTX interface {
Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error)
Query(context.Context, string, ...interface{}) (pgx.Rows, error)
QueryRow(context.Context, string, ...interface{}) pgx.Row
SendBatch(context.Context, *pgx.Batch) pgx.BatchResults
}
func New(db DBTX) *Queries {

View File

@@ -1,6 +1,6 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
// sqlc v1.27.0
// source: info.sql
package gen

View File

@@ -1,6 +1,6 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
// sqlc v1.27.0
package gen

Some files were not shown because too many files have changed in this diff Show More