Compare commits

...

29 Commits

Author SHA1 Message Date
gazenw
7a863987ec Merge pull request #55 from gaze-network/develop
Release v0.4.3
2024-10-04 13:23:30 +07:00
gazenw
f9c6ef8dfd fix: add different genesis runes config for each network (#54)
* fix: add different genesis runes config for each network

* fix: use slogx.Stringer

* refactor: remove unused value
2024-10-04 13:22:53 +07:00
gazenw
22a32468ef Merge pull request #53 from gaze-network/develop
Release v0.4.2
2024-10-03 18:26:32 +07:00
gazenw
b1d9f4f574 feat: add fractal support for runes (#52)
* feat: add fractal support for runes

* chore: remove common.HalvingInterval

* fix: update starting block height

* refactor: move network-genesis-rune definition to constants

* fix: use logger.Panic() instead of panic()

* fix: golangci-lint

* fix: missing return
2024-10-03 18:25:13 +07:00
Gaze
6a5ba528a8 Merge branch 'main' into develop 2024-10-02 20:28:42 +07:00
Nut Pinyo
6484887710 feat: add dust limit util (#51)
* feat: add dust limit util

* fix: use int64 instead
2024-10-02 15:08:29 +07:00
Gaze
9a1382fb9f feat: add fee estimation util 2024-09-06 22:56:57 +07:00
Gaze
3d5f3b414c feat: add sign tx util functions 2024-09-06 21:58:02 +07:00
gazenw
6e8a846c27 Merge pull request #50 from gaze-network/develop
Release v0.4.1
2024-08-30 16:18:40 +07:00
gazenw
8b690c4f7f fix: add decimals field in runes get holders (#49) 2024-08-30 16:17:57 +07:00
Ň𝑒𝕣ⒻẸ𝔻
cc37807ff9 Turkish translation (#46)
* Move Turkish translation of README.md to docs/ directory

* Add Turkish translation of README.md with last updated date and community translation notice

* Update README.md

* Update README.md

* Update README.md

* Update README.md

* Update README.md

* Update README.md

---------

Co-authored-by: gazenw <163862510+gazenw@users.noreply.github.com>
2024-08-30 16:15:14 +07:00
gazenw
9ab16d21e1 Merge pull request #48 from gaze-network/develop
Release v0.4.1
2024-08-28 23:49:15 +07:00
gazenw
32fec89914 Merge pull request #47 from gaze-network/feat/fractal-network-support
feat: add fractal network constant
2024-08-28 23:48:17 +07:00
Gaze
0131de6717 feat: add network support for fractal 2024-08-28 23:34:54 +07:00
gazenw
206eb65ee7 Merge pull request #44 from gaze-network/develop
Release v0.4.0
2024-08-13 17:31:16 +07:00
gazenw
fa810b0aed feat: add get utxo by tx hash and output idx for Runes (#42)
* feat: add handler

* feat: add get transaction

* feat: add get utxos output

* refactor: function parameter

* feat: add check utxo not found

* feat: add sats to get utxo output api

* feat: add utxo sats entity

* feat: add get utxos output batch

* feat: handle error

* fix: context

* fix: sqlc queries

* fix: remove unused code

* fix: comment

* fix: check utxo not found error

* refactor: add some space

* fix: comment

* fix: use public field
2024-08-13 17:20:46 +07:00
waiemwor
dca63a49fe Modify Nodesale API to allow query all nodes from a deployment. (#43)
* feat: allow query all nodes from a deployment.

* fix : function GetNodesByDeployment name.
2024-08-13 16:35:56 +07:00
Gaze
05ade4b9d5 Merge branch 'main' into develop 2024-08-06 13:25:48 +07:00
Gaze
074458584b fix: adjust content type check 2024-08-06 13:25:36 +07:00
waiemwor
db5dc75c41 Feature/nodesale (#40)
* feat: recover nodesale module.

* fix: refactored.

* fix: fix table type.

* fix: add entity

* fix: bug UTC time.

* ci: try to tidy before testing

* ci: touch result file

* ci: use echo to create new file

* fix: try to skip test in ci

* fix: remove os.Exit

* fix: handle error

* feat: add todo note

* fix: Cannot run nodesale test because qtx is not initiated.

* fix: 50% chance public key compare incorrectly.

* fix: more consistent SQL

* fix: sanity refactor.

* fix: remove unused code.

* fix: move last_block_default to config file.

* fix: minor mistakes.

* fix:

* fix: refactor

* fix: refactor

* fix: delegate tx hash not record into db.

* refactor: prepare for moving integration tests.

* refactor: convert to unit tests.

* fix: change to using input values since output values deducted fee.

* feat: add extra unit test.

* fix: wrong timestamp format.

* fix: handle block timeout = 0

---------

Co-authored-by: Gaze <gazenw@users.noreply.github.com>
2024-08-05 11:33:20 +07:00
Gaze
0474627336 Merge branch 'main' into develop 2024-08-05 11:31:42 +07:00
Gaze
359436e6eb fix(httpclient): preserve trailing slash if exists 2024-08-01 14:43:36 +07:00
gazenw
1967895d6d Merge pull request #41 from gaze-network/develop
Release v0.3.0
2024-07-25 15:07:51 +07:00
gazenw
7dcbd082ee feat: add Runes API pagination (#36)
* feat: implement pagination on get balance, get holders

* feat: paginate get transactions

* fix: remove debug

* feat: implement pagination in get utxos

* feat: sort response in get holders

* feat: cap batch query

* feat: add default limits to all endpoints

* chore: rename endpoint funcs

* fix: parse rune name spacers

* chore: use compare.Cmp

* feat: handle not found errors on all usecase
2024-07-23 15:46:45 +07:00
gazenw
880f4b2e6a fix: handle case where input rune id is not found (#37) 2024-07-15 18:32:28 +07:00
Gaze
3f727dc11b Merge remote-tracking branch 'origin/main' into develop 2024-07-15 16:33:56 +07:00
Planxnx
60717ecc65 feat(requestlogger): add response headers 2024-07-12 00:18:15 +07:00
Planxnx
6998adedb0 fix(requestlogger): logging all request headers 2024-07-11 23:53:27 +07:00
Thanee Charattrakool
add0a541b5 feat: Request Logger fields (#35)
* feat: add with request headers config

* feat: add with fields config

* feat: format request queries
2024-07-11 23:41:18 +07:00
100 changed files with 7823 additions and 310 deletions

View File

@@ -58,6 +58,9 @@ jobs:
cache: true # caching and restoring go modules and build outputs.
- run: echo "GOVERSION=$(go version)" >> $GITHUB_ENV
- name: Touch test result file
run: echo "" > test_output.json
- name: Build
run: go build -v ./...

View File

@@ -39,7 +39,7 @@
"ui.completion.usePlaceholders": false,
"ui.diagnostic.analyses": {
// https://github.com/golang/tools/blob/master/gopls/doc/analyzers.md
// "fieldalignment": false,
"fieldalignment": false,
"nilness": true,
"shadow": false,
"unusedparams": true,

View File

@@ -1,5 +1,7 @@
<!-- omit from toc -->
- [Türkçe](https://github.com/Rumeyst/gaze-indexer/blob/turkish-translation/docs/README_tr.md)
# Gaze Indexer
Gaze Indexer is an open-source and modular indexing client for Bitcoin meta-protocols with **Unified Consistent APIs** across fungible token protocols.

View File

@@ -17,6 +17,7 @@ import (
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/indexer"
"github.com/gaze-network/indexer-network/internal/config"
"github.com/gaze-network/indexer-network/modules/nodesale"
"github.com/gaze-network/indexer-network/modules/runes"
"github.com/gaze-network/indexer-network/pkg/automaxprocs"
"github.com/gaze-network/indexer-network/pkg/logger"
@@ -39,6 +40,7 @@ import (
// Register Modules
var Modules = do.Package(
do.LazyNamed("runes", runes.New),
do.LazyNamed("nodesale", nodesale.New),
)
func NewRunCommand() *cobra.Command {

View File

@@ -6,13 +6,15 @@ import (
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/constants"
"github.com/gaze-network/indexer-network/modules/runes"
"github.com/gaze-network/indexer-network/modules/nodesale"
runesconstants "github.com/gaze-network/indexer-network/modules/runes/constants"
"github.com/spf13/cobra"
)
var versions = map[string]string{
"": constants.Version,
"runes": runes.Version,
"": constants.Version,
"runes": runesconstants.Version,
"nodesale": nodesale.Version,
}
type versionCmdOptions struct {

View File

@@ -1,4 +0,0 @@
package common
// HalvingInterval is the number of blocks between each halving event.
const HalvingInterval = 210_000

View File

@@ -1,22 +1,31 @@
package common
import "github.com/btcsuite/btcd/chaincfg"
import (
"github.com/btcsuite/btcd/chaincfg"
"github.com/gaze-network/indexer-network/pkg/logger"
)
type Network string
const (
NetworkMainnet Network = "mainnet"
NetworkTestnet Network = "testnet"
NetworkMainnet Network = "mainnet"
NetworkTestnet Network = "testnet"
NetworkFractalMainnet Network = "fractal-mainnet"
NetworkFractalTestnet Network = "fractal-testnet"
)
var supportedNetworks = map[Network]struct{}{
NetworkMainnet: {},
NetworkTestnet: {},
NetworkMainnet: {},
NetworkTestnet: {},
NetworkFractalMainnet: {},
NetworkFractalTestnet: {},
}
var chainParams = map[Network]*chaincfg.Params{
NetworkMainnet: &chaincfg.MainNetParams,
NetworkTestnet: &chaincfg.TestNet3Params,
NetworkMainnet: &chaincfg.MainNetParams,
NetworkTestnet: &chaincfg.TestNet3Params,
NetworkFractalMainnet: &chaincfg.MainNetParams,
NetworkFractalTestnet: &chaincfg.MainNetParams,
}
func (n Network) IsSupported() bool {
@@ -31,3 +40,15 @@ func (n Network) ChainParams() *chaincfg.Params {
func (n Network) String() string {
return string(n)
}
func (n Network) HalvingInterval() uint64 {
switch n {
case NetworkMainnet, NetworkTestnet:
return 210_000
case NetworkFractalMainnet, NetworkFractalTestnet:
return 2_100_000
default:
logger.Panic("invalid network")
return 0
}
}

View File

@@ -47,3 +47,11 @@ modules:
password: "password"
db_name: "postgres"
# url: "postgres://postgres:password@localhost:5432/postgres?sslmode=prefer" # [Optional] This will override other database credentials above.
nodesale:
postgres:
host: "localhost"
port: 5432
user: "postgres"
password: "P@ssw0rd"
db_name: "postgres"
last_block_default: 400

View File

@@ -285,3 +285,12 @@ func (d *BitcoinNodeDatasource) GetBlockHeader(ctx context.Context, height int64
return types.ParseMsgBlockHeader(*block, height), nil
}
func (d *BitcoinNodeDatasource) GetRawTransactionByTxHash(ctx context.Context, txHash chainhash.Hash) (*wire.MsgTx, error) {
transaction, err := d.btcclient.GetRawTransaction(&txHash)
if err != nil {
return nil, errors.Wrap(err, "failed to get raw transaction")
}
return transaction.MsgTx(), nil
}

165
docs/README_tr.md Normal file
View File

@@ -0,0 +1,165 @@
## Çeviriler
- [English (İngilizce)](../README.md)
**Son Güncelleme:** 21 Ağustos 2024
> **Not:** Bu belge, topluluk tarafından yapılmış bir çeviridir. Ana README.md dosyasındaki güncellemeler buraya otomatik olarak yansıtılmayabilir. En güncel bilgiler için [İngilizce sürümü](../README.md) inceleyin.
# Gaze Indexer
Gaze Indexer, değiştirilebilir token protokolleri arasında **Birleştirilmiş Tutarlı API'lere** sahip Bitcoin meta-protokolleri için açık kaynaklı ve modüler bir indeksleme istemcisidir.
Gaze Indexer, kullanıcıların tüm modülleri tek bir komutla tek bir monolitik örnekte veya dağıtılmış bir mikro hizmet kümesi olarak çalıştırmasına olanak tanıyan **modülerlik** göz önünde bulundurularak oluşturulmuştur.
Gaze Indexer, verimli veri getirme, yeniden düzenleme algılama ve veritabanı taşıma aracı ile HERHANGİ bir meta-protokol indeksleyici oluşturmak için bir temel görevi görür.
Bu, geliştiricilerin **gerçekten** önemli olana odaklanmasını sağlar: Meta-protokol indeksleme mantığı. Yeni meta-protokoller, yeni modüller uygulanarak kolayca eklenebilir.
- [Modüller](#modules)
- [1. Runes](#1-runes)
- [Kurulum](#installation)
- [Önkoşullar](#prerequisites)
- [1. Donanım Gereksinimleri](#1-hardware-requirements)
- [2. Bitcoin Core RPC sunucusunu hazırlayın.](#2-prepare-bitcoin-core-rpc-server)
- [3. Veritabanı hazırlayın.](#3-prepare-database)
- [4. `config.yaml` dosyasını hazırlayın.](#4-prepare-configyaml-file)
- [Docker ile yükle (önerilir)](#install-with-docker-recommended)
- [Kaynaktan yükle](#install-from-source)
## Modüller
### 1. Runes
Runes Dizinleyici ilk meta-protokol dizinleyicimizdir. Bitcoin işlemlerini kullanarak Runes durumlarını, işlemlerini, rün taşlarını ve bakiyelerini indeksler.
Geçmiş Runes verilerini sorgulamak için bir dizi API ile birlikte gelir. Tüm ayrıntılar için [API Referansı] (https://api-docs.gaze.network) adresimize bakın.
## Kurulum
### Önkoşullar
#### 1. Donanım Gereksinimleri
Her modül farklı donanım gereksinimleri gerektirir.
| Modül | CPU | RAM |
| ------ | --------- | ---- |
| Runes | 0,5 çekirdek | 1 GB |
#### 2. Bitcoin Core RPC sunucusunu hazırlayın.
Gaze Indexer'ın işlem verilerini kendi barındırdığı ya da QuickNode gibi yönetilen sağlayıcıları kullanan bir Bitcoin Core RPC'den alması gerekir.
Bir Bitcoin Core'u kendiniz barındırmak için bkz. https://bitcoin.org/en/full-node.
#### 3. Veritabanını hazırlayın.
Gaze Indexer PostgreSQL için birinci sınıf desteğe sahiptir. Diğer veritabanlarını kullanmak isterseniz, her modülün Veri Ağ Geçidi arayüzünü karşılayan kendi veritabanı havuzunuzu uygulayabilirsiniz.
İşte her modül için minimum veritabanı disk alanı gereksinimimiz.
| Modül | Veritabanı Depolama Alanı (mevcut) | Veritabanı Depolama Alanı (1 yıl içinde) |
| ------ | -------------------------- | ---------------------------- |
| Runes | 10 GB | 150 GB |
#### 4. config.yaml` dosyasını hazırlayın.
```yaml
# config.yaml
logger:
output: TEXT # Output format for logs. current supported formats: "TEXT" | "JSON" | "GCP"
debug: false
# Network to run the indexer on. Current supported networks: "mainnet" | "testnet"
network: mainnet
# Bitcoin Core RPC configuration options.
bitcoin_node:
host: "" # [Required] Host of Bitcoin Core RPC (without https://)
user: "" # Username to authenticate with Bitcoin Core RPC
pass: "" # Password to authenticate with Bitcoin Core RPC
disable_tls: false # Set to true to disable tls
# Block reporting configuration options. See Block Reporting section for more details.
reporting:
disabled: false # Set to true to disable block reporting to Gaze Network. Default is false.
base_url: "https://indexer.api.gaze.network" # Defaults to "https://indexer.api.gaze.network" if left empty
name: "" # [Required if not disabled] Name of this indexer to show on the Gaze Network dashboard
website_url: "" # Public website URL to show on the dashboard. Can be left empty.
indexer_api_url: "" # Public url to access this indexer's API. Can be left empty if you want to keep your indexer private.
# HTTP server configuration options.
http_server:
port: 8080 # Port to run the HTTP server on for modules with HTTP API handlers.
# Meta-protocol modules configuration options.
modules:
# Configuration options for Runes module. Can be removed if not used.
runes:
database: "postgres" # Database to store Runes data. current supported databases: "postgres"
datasource: "bitcoin-node" # Data source to be used for Bitcoin data. current supported data sources: "bitcoin-node".
api_handlers: # API handlers to enable. current supported handlers: "http"
- http
postgres:
host: "localhost"
port: 5432
user: "postgres"
password: "password"
db_name: "postgres"
# url: "postgres://postgres:password@localhost:5432/postgres?sslmode=prefer" # [Optional] This will override other database credentials above.
```
### Docker ile yükleyin (önerilir)
Kurulum kılavuzumuz için `docker-compose` kullanacağız. Docker-compose.yaml` dosyasının `config.yaml` dosyası ile aynı dizinde olduğundan emin olun.
```yaml
# docker-compose.yaml
services:
gaze-indexer:
image: ghcr.io/gaze-network/gaze-indexer:v0.2.1
container_name: gaze-indexer
restart: unless-stopped
ports:
- 8080:8080 # Expose HTTP server port to host
volumes:
- "./config.yaml:/app/config.yaml" # mount config.yaml file to the container as "/app/config.yaml"
command: ["/app/main", "run", "--modules", "runes"] # Put module flags after "run" commands to select which modules to run.
```
### Kaynaktan yükleyin
1. Go` sürüm 1.22 veya daha üstünü yükleyin. Go kurulum kılavuzuna bakın [burada](https://go.dev/doc/install).
2. Bu depoyu klonlayın.
```bash
git clone https://github.com/gaze-network/gaze-indexer.git
cd gaze-indexer
```
3. Ana ikili dosyayı oluşturun.
```bash
# Bağımlılıkları al
go mod indir
# Ana ikili dosyayı oluşturun
go build -o gaze main.go
```
4. Veritabanı geçişlerini `migrate` komutu ve modül bayrakları ile çalıştırın.
```bash
./gaze migrate up --runes --database postgres://postgres:password@localhost:5432/postgres
```
5. Dizinleyiciyi `run` komutu ve modül bayrakları ile başlatın.
```bash
./gaze run --modules runes
```
Eğer `config.yaml` dosyası `./app/config.yaml` adresinde bulunmuyorsa, `config.yaml` dosyasının yolunu belirtmek için `--config` bayrağını kullanın.
```bash
./gaze run --modules runes --config /path/to/config.yaml
```
## Çeviriler
- [English (İngilizce)](../README.md)

7
go.mod
View File

@@ -25,12 +25,15 @@ require (
github.com/valyala/fasthttp v1.51.0
go.uber.org/automaxprocs v1.5.3
golang.org/x/sync v0.7.0
google.golang.org/protobuf v1.33.0
)
require github.com/stretchr/objx v0.5.2 // indirect
require (
github.com/andybalholm/brotli v1.0.5 // indirect
github.com/bitonicnl/verify-signed-message v0.7.1
github.com/btcsuite/btcd/btcec/v2 v2.3.3 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.3.3
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd // indirect
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 // indirect
@@ -38,7 +41,7 @@ require (
github.com/cockroachdb/redact v1.1.5 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/decred/dcrd/crypto/blake256 v1.0.1 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect

4
go.sum
View File

@@ -221,6 +221,8 @@ github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMV
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
@@ -310,6 +312,8 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=

View File

@@ -8,6 +8,7 @@ import (
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
nodesaleconfig "github.com/gaze-network/indexer-network/modules/nodesale/config"
runesconfig "github.com/gaze-network/indexer-network/modules/runes/config"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
@@ -61,7 +62,8 @@ type BitcoinNodeClient struct {
}
type Modules struct {
Runes runesconfig.Config `mapstructure:"runes"`
Runes runesconfig.Config `mapstructure:"runes"`
NodeSale nodesaleconfig.Config `mapstructure:"nodesale"`
}
type HTTPServerConfig struct {

View File

@@ -0,0 +1,99 @@
package httphandler
import (
"fmt"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
"github.com/gaze-network/indexer-network/modules/nodesale/protobuf"
"github.com/gofiber/fiber/v2"
"google.golang.org/protobuf/encoding/protojson"
)
type deployRequest struct {
DeployID string `params:"deployId"`
}
type tierResponse struct {
PriceSat uint32 `json:"priceSat"`
Limit uint32 `json:"limit"`
MaxPerAddress uint32 `json:"maxPerAddress"`
Sold int64 `json:"sold"`
}
type deployResponse struct {
Id string `json:"id"`
Name string `json:"name"`
StartsAt int64 `json:"startsAt"`
EndsAt int64 `json:"endsAt"`
Tiers []tierResponse `json:"tiers"`
SellerPublicKey string `json:"sellerPublicKey"`
MaxPerAddress uint32 `json:"maxPerAddress"`
DeployTxHash string `json:"deployTxHash"`
}
func (h *handler) deployHandler(ctx *fiber.Ctx) error {
var request deployRequest
err := ctx.ParamsParser(&request)
if err != nil {
return errors.Wrap(err, "cannot parse param")
}
var blockHeight uint64
var txIndex uint32
count, err := fmt.Sscanf(request.DeployID, "%d-%d", &blockHeight, &txIndex)
if count != 2 || err != nil {
return errs.NewPublicError("Invalid deploy ID")
}
deploys, err := h.nodeSaleDg.GetNodeSale(ctx.UserContext(), datagateway.GetNodeSaleParams{
BlockHeight: blockHeight,
TxIndex: txIndex,
})
if err != nil {
return errors.Wrap(err, "Cannot get NodeSale from db")
}
if len(deploys) < 1 {
return errs.NewPublicError("NodeSale not found")
}
deploy := deploys[0]
nodeCount, err := h.nodeSaleDg.GetNodeCountByTierIndex(ctx.UserContext(), datagateway.GetNodeCountByTierIndexParams{
SaleBlock: deploy.BlockHeight,
SaleTxIndex: deploy.TxIndex,
FromTier: 0,
ToTier: uint32(len(deploy.Tiers) - 1),
})
if err != nil {
return errors.Wrap(err, "Cannot get node count from db")
}
tiers := make([]protobuf.Tier, len(deploy.Tiers))
tierResponses := make([]tierResponse, len(deploy.Tiers))
for i, tierJson := range deploy.Tiers {
tier := &tiers[i]
err := protojson.Unmarshal(tierJson, tier)
if err != nil {
return errors.Wrap(err, "Failed to decode tiers json")
}
tierResponses[i].Limit = tiers[i].Limit
tierResponses[i].MaxPerAddress = tiers[i].MaxPerAddress
tierResponses[i].PriceSat = tiers[i].PriceSat
tierResponses[i].Sold = nodeCount[i].Count
}
err = ctx.JSON(&deployResponse{
Id: request.DeployID,
Name: deploy.Name,
StartsAt: deploy.StartsAt.UTC().Unix(),
EndsAt: deploy.EndsAt.UTC().Unix(),
Tiers: tierResponses,
SellerPublicKey: deploy.SellerPublicKey,
MaxPerAddress: deploy.MaxPerAddress,
DeployTxHash: deploy.DeployTxHash,
})
if err != nil {
return errors.Wrap(err, "Go fiber cannot parse JSON")
}
return nil
}

View File

@@ -0,0 +1,56 @@
package httphandler
import (
"encoding/json"
"time"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/modules/nodesale/protobuf"
"github.com/gofiber/fiber/v2"
)
type eventRequest struct {
WalletAddress string `query:"walletAddress"`
}
type eventResposne struct {
TxHash string `json:"txHash"`
BlockHeight int64 `json:"blockHeight"`
TxIndex int32 `json:"txIndex"`
WalletAddress string `json:"walletAddress"`
Action string `json:"action"`
ParsedMessage json.RawMessage `json:"parsedMessage"`
BlockTimestamp time.Time `json:"blockTimestamp"`
BlockHash string `json:"blockHash"`
}
func (h *handler) eventsHandler(ctx *fiber.Ctx) error {
var request eventRequest
err := ctx.QueryParser(&request)
if err != nil {
return errors.Wrap(err, "cannot parse query")
}
events, err := h.nodeSaleDg.GetEventsByWallet(ctx.UserContext(), request.WalletAddress)
if err != nil {
return errors.Wrap(err, "Can't get events from db")
}
responses := make([]eventResposne, len(events))
for i, event := range events {
responses[i].TxHash = event.TxHash
responses[i].BlockHeight = event.BlockHeight
responses[i].TxIndex = event.TxIndex
responses[i].WalletAddress = event.WalletAddress
responses[i].Action = protobuf.Action_name[event.Action]
responses[i].ParsedMessage = event.ParsedMessage
responses[i].BlockTimestamp = event.BlockTimestamp
responses[i].BlockHash = event.BlockHash
}
err = ctx.JSON(responses)
if err != nil {
return errors.Wrap(err, "Go fiber cannot parse JSON")
}
return nil
}

View File

@@ -0,0 +1,15 @@
package httphandler
import (
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
)
type handler struct {
nodeSaleDg datagateway.NodeSaleDataGateway
}
func New(datagateway datagateway.NodeSaleDataGateway) *handler {
h := handler{}
h.nodeSaleDg = datagateway
return &h
}

View File

@@ -0,0 +1,26 @@
package httphandler
import (
"github.com/cockroachdb/errors"
"github.com/gofiber/fiber/v2"
)
type infoResponse struct {
IndexedBlockHeight int64 `json:"indexedBlockHeight"`
IndexedBlockHash string `json:"indexedBlockHash"`
}
func (h *handler) infoHandler(ctx *fiber.Ctx) error {
block, err := h.nodeSaleDg.GetLastProcessedBlock(ctx.UserContext())
if err != nil {
return errors.Wrap(err, "Cannot get last processed block")
}
err = ctx.JSON(infoResponse{
IndexedBlockHeight: block.BlockHeight,
IndexedBlockHash: block.BlockHash,
})
if err != nil {
return errors.Wrap(err, "Go fiber cannot parse JSON")
}
return nil
}

View File

@@ -0,0 +1,82 @@
package httphandler
import (
"fmt"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
"github.com/gofiber/fiber/v2"
)
type nodeRequest struct {
DeployId string `query:"deployId"`
OwnerPublicKey string `query:"ownerPublicKey"`
DelegateePublicKey string `query:"delegateePublicKey"`
}
type nodeResponse struct {
DeployId string `json:"deployId"`
NodeId uint32 `json:"nodeId"`
TierIndex int32 `json:"tierIndex"`
DelegatedTo string `json:"delegatedTo"`
OwnerPublicKey string `json:"ownerPublicKey"`
PurchaseTxHash string `json:"purchaseTxHash"`
DelegateTxHash string `json:"delegateTxHash"`
PurchaseBlockHeight int32 `json:"purchaseBlockHeight"`
}
func (h *handler) nodesHandler(ctx *fiber.Ctx) error {
var request nodeRequest
err := ctx.QueryParser(&request)
if err != nil {
return errors.Wrap(err, "cannot parse query")
}
ownerPublicKey := request.OwnerPublicKey
delegateePublicKey := request.DelegateePublicKey
var blockHeight int64
var txIndex int32
count, err := fmt.Sscanf(request.DeployId, "%d-%d", &blockHeight, &txIndex)
if count != 2 || err != nil {
return errs.NewPublicError("Invalid deploy ID")
}
var nodes []entity.Node
if ownerPublicKey == "" {
nodes, err = h.nodeSaleDg.GetNodesByDeployment(ctx.UserContext(), blockHeight, txIndex)
if err != nil {
return errors.Wrap(err, "Can't get nodes from db")
}
} else {
nodes, err = h.nodeSaleDg.GetNodesByPubkey(ctx.UserContext(), datagateway.GetNodesByPubkeyParams{
SaleBlock: blockHeight,
SaleTxIndex: txIndex,
OwnerPublicKey: ownerPublicKey,
DelegatedTo: delegateePublicKey,
})
if err != nil {
return errors.Wrap(err, "Can't get nodes from db")
}
}
responses := make([]nodeResponse, len(nodes))
for i, node := range nodes {
responses[i].DeployId = request.DeployId
responses[i].NodeId = node.NodeID
responses[i].TierIndex = node.TierIndex
responses[i].DelegatedTo = node.DelegatedTo
responses[i].OwnerPublicKey = node.OwnerPublicKey
responses[i].PurchaseTxHash = node.PurchaseTxHash
responses[i].DelegateTxHash = node.DelegateTxHash
responses[i].PurchaseBlockHeight = txIndex
}
err = ctx.JSON(responses)
if err != nil {
return errors.Wrap(err, "Go fiber cannot parse JSON")
}
return nil
}

View File

@@ -0,0 +1,16 @@
package httphandler
import (
"github.com/gofiber/fiber/v2"
)
func (h *handler) Mount(router fiber.Router) error {
r := router.Group("/nodesale/v1")
r.Get("/info", h.infoHandler)
r.Get("/deploy/:deployId", h.deployHandler)
r.Get("/nodes", h.nodesHandler)
r.Get("/events", h.eventsHandler)
return nil
}

View File

@@ -0,0 +1,8 @@
package config
import "github.com/gaze-network/indexer-network/internal/postgres"
type Config struct {
Postgres postgres.Config `mapstructure:"postgres"`
LastBlockDefault int64 `mapstructure:"last_block_default"`
}

View File

@@ -0,0 +1,9 @@
BEGIN;
DROP TABLE IF EXISTS nodes;
DROP TABLE IF EXISTS node_sales;
DROP TABLE IF EXISTS events;
DROP TABLE IF EXISTS blocks;
COMMIT;

View File

@@ -0,0 +1,64 @@
BEGIN;
CREATE TABLE IF NOT EXISTS blocks (
"block_height" BIGINT NOT NULL,
"block_hash" TEXT NOT NULL,
"module" TEXT NOT NULL,
PRIMARY KEY("block_height", "block_hash")
);
CREATE TABLE IF NOT EXISTS events (
"tx_hash" TEXT NOT NULL PRIMARY KEY,
"block_height" BIGINT NOT NULL,
"tx_index" INTEGER NOT NULL,
"wallet_address" TEXT NOT NULL,
"valid" BOOLEAN NOT NULL,
"action" INTEGER NOT NULL,
"raw_message" BYTEA NOT NULL,
"parsed_message" JSONB NOT NULL DEFAULT '{}',
"block_timestamp" TIMESTAMP NOT NULL,
"block_hash" TEXT NOT NULL,
"metadata" JSONB NOT NULL DEFAULT '{}',
"reason" TEXT NOT NULL DEFAULT ''
);
INSERT INTO events("tx_hash", "block_height", "tx_index",
"wallet_address", "valid", "action",
"raw_message", "parsed_message", "block_timestamp",
"block_hash", "metadata")
VALUES ('', -1, -1,
'', false, -1,
'', '{}', NOW(),
'', '{}');
CREATE TABLE IF NOT EXISTS node_sales (
"block_height" BIGINT NOT NULL,
"tx_index" INTEGER NOT NULL,
"name" TEXT NOT NULL,
"starts_at" TIMESTAMP NOT NULL,
"ends_at" TIMESTAMP NOT NULL,
"tiers" JSONB[] NOT NULL,
"seller_public_key" TEXT NOT NULL,
"max_per_address" INTEGER NOT NULL,
"deploy_tx_hash" TEXT NOT NULL REFERENCES events(tx_hash) ON DELETE CASCADE,
"max_discount_percentage" INTEGER NOT NULL,
"seller_wallet" TEXT NOT NULL,
PRIMARY KEY ("block_height", "tx_index")
);
CREATE TABLE IF NOT EXISTS nodes (
"sale_block" BIGINT NOT NULL,
"sale_tx_index" INTEGER NOT NULL,
"node_id" INTEGER NOT NULL,
"tier_index" INTEGER NOT NULL,
"delegated_to" TEXT NOT NULL DEFAULT '',
"owner_public_key" TEXT NOT NULL,
"purchase_tx_hash" TEXT NOT NULL REFERENCES events(tx_hash) ON DELETE CASCADE,
"delegate_tx_hash" TEXT NOT NULL DEFAULT '' REFERENCES events(tx_hash) ON DELETE SET DEFAULT,
PRIMARY KEY("sale_block", "sale_tx_index", "node_id"),
FOREIGN KEY("sale_block", "sale_tx_index") REFERENCES node_sales("block_height", "tx_index")
);
COMMIT;

View File

@@ -0,0 +1,15 @@
-- name: GetLastProcessedBlock :one
SELECT * FROM blocks ORDER BY block_height DESC LIMIT 1;
-- name: GetBlock :one
SELECT * FROM blocks
WHERE "block_height" = $1;
-- name: RemoveBlockFrom :execrows
DELETE FROM blocks
WHERE "block_height" >= @from_block;
-- name: CreateBlock :exec
INSERT INTO blocks ("block_height", "block_hash", "module")
VALUES ($1, $2, $3);

View File

@@ -0,0 +1,14 @@
-- name: RemoveEventsFromBlock :execrows
DELETE FROM events
WHERE "block_height" >= @from_block;
-- name: CreateEvent :exec
INSERT INTO events ("tx_hash", "block_height", "tx_index", "wallet_address", "valid", "action",
"raw_message", "parsed_message", "block_timestamp", "block_hash", "metadata",
"reason")
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12);
-- name: GetEventsByWallet :many
SELECT *
FROM events
WHERE wallet_address = $1;

View File

@@ -0,0 +1,57 @@
-- name: ClearDelegate :execrows
UPDATE nodes
SET "delegated_to" = ''
WHERE "delegate_tx_hash" = '';
-- name: SetDelegates :execrows
UPDATE nodes
SET delegated_to = @delegatee, delegate_tx_hash = $3
WHERE sale_block = $1 AND
sale_tx_index = $2 AND
node_id = ANY (@node_ids::int[]);
-- name: GetNodesByIds :many
SELECT *
FROM nodes
WHERE sale_block = $1 AND
sale_tx_index = $2 AND
node_id = ANY (@node_ids::int[]);
-- name: GetNodesByOwner :many
SELECT *
FROM nodes
WHERE sale_block = $1 AND
sale_tx_index = $2 AND
owner_public_key = $3
ORDER BY tier_index;
-- name: GetNodesByPubkey :many
SELECT nodes.*
FROM nodes JOIN events ON nodes.purchase_tx_hash = events.tx_hash
WHERE sale_block = $1 AND
sale_tx_index = $2 AND
owner_public_key = $3 AND
delegated_to = $4;
-- name: CreateNode :exec
INSERT INTO nodes (sale_block, sale_tx_index, node_id, tier_index, delegated_to, owner_public_key, purchase_tx_hash, delegate_tx_hash)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8);
-- name: GetNodeCountByTierIndex :many
SELECT (tiers.tier_index)::int AS tier_index, count(nodes.tier_index)
FROM generate_series(@from_tier::int,@to_tier::int) AS tiers(tier_index)
LEFT JOIN
(SELECT *
FROM nodes
WHERE sale_block = $1 AND
sale_tx_index= $2)
AS nodes ON tiers.tier_index = nodes.tier_index
GROUP BY tiers.tier_index
ORDER BY tiers.tier_index;
-- name: GetNodesByDeployment :many
SELECT *
FROM nodes
WHERE sale_block = $1 AND
sale_tx_index = $2;

View File

@@ -0,0 +1,9 @@
-- name: CreateNodeSale :exec
INSERT INTO node_sales ("block_height", "tx_index", "name", "starts_at", "ends_at", "tiers", "seller_public_key", "max_per_address", "deploy_tx_hash", "max_discount_percentage", "seller_wallet")
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11);
-- name: GetNodeSale :many
SELECT *
FROM node_sales
WHERE block_height = $1 AND
tx_index = $2;

View File

@@ -0,0 +1,3 @@
-- name: ClearEvents :exec
DELETE FROM events
WHERE tx_hash <> '';

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,77 @@
package datagateway
import (
"context"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
)
type NodeSaleDataGateway interface {
BeginNodeSaleTx(ctx context.Context) (NodeSaleDataGatewayWithTx, error)
CreateBlock(ctx context.Context, arg entity.Block) error
GetBlock(ctx context.Context, blockHeight int64) (*entity.Block, error)
GetLastProcessedBlock(ctx context.Context) (*entity.Block, error)
RemoveBlockFrom(ctx context.Context, fromBlock int64) (int64, error)
RemoveEventsFromBlock(ctx context.Context, fromBlock int64) (int64, error)
ClearDelegate(ctx context.Context) (int64, error)
GetNodesByIds(ctx context.Context, arg GetNodesByIdsParams) ([]entity.Node, error)
CreateEvent(ctx context.Context, arg entity.NodeSaleEvent) error
SetDelegates(ctx context.Context, arg SetDelegatesParams) (int64, error)
CreateNodeSale(ctx context.Context, arg entity.NodeSale) error
GetNodeSale(ctx context.Context, arg GetNodeSaleParams) ([]entity.NodeSale, error)
GetNodesByOwner(ctx context.Context, arg GetNodesByOwnerParams) ([]entity.Node, error)
CreateNode(ctx context.Context, arg entity.Node) error
GetNodeCountByTierIndex(ctx context.Context, arg GetNodeCountByTierIndexParams) ([]GetNodeCountByTierIndexRow, error)
GetNodesByPubkey(ctx context.Context, arg GetNodesByPubkeyParams) ([]entity.Node, error)
GetNodesByDeployment(ctx context.Context, saleBlock int64, saleTxIndex int32) ([]entity.Node, error)
GetEventsByWallet(ctx context.Context, walletAddress string) ([]entity.NodeSaleEvent, error)
}
type NodeSaleDataGatewayWithTx interface {
NodeSaleDataGateway
Tx
}
type GetNodesByIdsParams struct {
SaleBlock uint64
SaleTxIndex uint32
NodeIds []uint32
}
type SetDelegatesParams struct {
SaleBlock uint64
SaleTxIndex int32
Delegatee string
DelegateTxHash string
NodeIds []uint32
}
type GetNodeSaleParams struct {
BlockHeight uint64
TxIndex uint32
}
type GetNodesByOwnerParams struct {
SaleBlock uint64
SaleTxIndex uint32
OwnerPublicKey string
}
type GetNodeCountByTierIndexParams struct {
SaleBlock uint64
SaleTxIndex uint32
FromTier uint32
ToTier uint32
}
type GetNodeCountByTierIndexRow struct {
TierIndex int32
Count int64
}
type GetNodesByPubkeyParams struct {
SaleBlock int64
SaleTxIndex int32
OwnerPublicKey string
DelegatedTo string
}

View File

@@ -0,0 +1,12 @@
package datagateway
import "context"
type Tx interface {
// Commit commits the DB transaction. All changes made after Begin() will be persisted. Calling Commit() will close the current transaction.
// If Commit() is called without a prior Begin(), it must be a no-op.
Commit(ctx context.Context) error
// Rollback rolls back the DB transaction. All changes made after Begin() will be discarded.
// Rollback() must be safe to call even if no transaction is active. Hence, a defer Rollback() is safe, even if Commit() was called prior with non-error conditions.
Rollback(ctx context.Context) error
}

View File

@@ -0,0 +1,61 @@
package nodesale
import (
"context"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
delegatevalidator "github.com/gaze-network/indexer-network/modules/nodesale/internal/validator/delegate"
)
func (p *Processor) ProcessDelegate(ctx context.Context, qtx datagateway.NodeSaleDataGatewayWithTx, block *types.Block, event NodeSaleEvent) error {
validator := delegatevalidator.New()
delegate := event.EventMessage.Delegate
_, nodes, err := validator.NodesExist(ctx, qtx, delegate.DeployID, delegate.NodeIDs)
if err != nil {
return errors.Wrap(err, "Cannot query")
}
for _, node := range nodes {
valid := validator.EqualXonlyPublicKey(node.OwnerPublicKey, event.TxPubkey)
if !valid {
break
}
}
err = qtx.CreateEvent(ctx, entity.NodeSaleEvent{
TxHash: event.Transaction.TxHash.String(),
TxIndex: int32(event.Transaction.Index),
Action: int32(event.EventMessage.Action),
RawMessage: event.RawData,
ParsedMessage: event.EventJson,
BlockTimestamp: block.Header.Timestamp,
BlockHash: event.Transaction.BlockHash.String(),
BlockHeight: event.Transaction.BlockHeight,
Valid: validator.Valid,
WalletAddress: p.PubkeyToPkHashAddress(event.TxPubkey).EncodeAddress(),
Metadata: nil,
Reason: validator.Reason,
})
if err != nil {
return errors.Wrap(err, "Failed to insert event")
}
if validator.Valid {
_, err = qtx.SetDelegates(ctx, datagateway.SetDelegatesParams{
SaleBlock: delegate.DeployID.Block,
SaleTxIndex: int32(delegate.DeployID.TxIndex),
Delegatee: delegate.DelegateePublicKey,
DelegateTxHash: event.Transaction.TxHash.String(),
NodeIds: delegate.NodeIDs,
})
if err != nil {
return errors.Wrap(err, "Failed to set delegate")
}
}
return nil
}

View File

@@ -0,0 +1,84 @@
package nodesale
import (
"context"
"encoding/hex"
"testing"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway/mocks"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
"github.com/gaze-network/indexer-network/modules/nodesale/protobuf"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestDelegate(t *testing.T) {
ctx := context.Background()
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
buyerPrivateKey, _ := btcec.NewPrivateKey()
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
delegateePrivateKey, _ := btcec.NewPrivateKey()
delegateePubkeyHex := hex.EncodeToString(delegateePrivateKey.PubKey().SerializeCompressed())
delegateMessage := &protobuf.NodeSaleEvent{
Action: protobuf.Action_ACTION_DELEGATE,
Delegate: &protobuf.ActionDelegate{
DelegateePublicKey: delegateePubkeyHex,
NodeIDs: []uint32{9, 10},
DeployID: &protobuf.ActionID{
Block: uint64(testBlockHeight) - 2,
TxIndex: uint32(testTxIndex) - 2,
},
},
}
event, block := assembleTestEvent(buyerPrivateKey, "131313131313", "131313131313", 0, 0, delegateMessage)
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
return event.Valid == true
})).Return(nil)
mockDgTx.EXPECT().GetNodesByIds(mock.Anything, datagateway.GetNodesByIdsParams{
SaleBlock: delegateMessage.Delegate.DeployID.Block,
SaleTxIndex: delegateMessage.Delegate.DeployID.TxIndex,
NodeIds: []uint32{9, 10},
}).Return([]entity.Node{
{
SaleBlock: delegateMessage.Delegate.DeployID.Block,
SaleTxIndex: delegateMessage.Delegate.DeployID.TxIndex,
NodeID: 9,
TierIndex: 1,
DelegatedTo: "",
OwnerPublicKey: buyerPubkeyHex,
PurchaseTxHash: mock.Anything,
DelegateTxHash: "",
},
{
SaleBlock: delegateMessage.Delegate.DeployID.Block,
SaleTxIndex: delegateMessage.Delegate.DeployID.TxIndex,
NodeID: 10,
TierIndex: 2,
DelegatedTo: "",
OwnerPublicKey: buyerPubkeyHex,
PurchaseTxHash: mock.Anything,
DelegateTxHash: "",
},
}, nil)
mockDgTx.EXPECT().SetDelegates(mock.Anything, datagateway.SetDelegatesParams{
SaleBlock: delegateMessage.Delegate.DeployID.Block,
SaleTxIndex: int32(delegateMessage.Delegate.DeployID.TxIndex),
Delegatee: delegateMessage.Delegate.DelegateePublicKey,
DelegateTxHash: event.Transaction.TxHash.String(),
NodeIds: delegateMessage.Delegate.NodeIDs,
}).Return(2, nil)
err := p.ProcessDelegate(ctx, mockDgTx, block, event)
require.NoError(t, err)
}

View File

@@ -0,0 +1,67 @@
package nodesale
import (
"context"
"time"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/validator"
"google.golang.org/protobuf/encoding/protojson"
)
func (p *Processor) ProcessDeploy(ctx context.Context, qtx datagateway.NodeSaleDataGatewayWithTx, block *types.Block, event NodeSaleEvent) error {
deploy := event.EventMessage.Deploy
validator := validator.New()
validator.EqualXonlyPublicKey(deploy.SellerPublicKey, event.TxPubkey)
err := qtx.CreateEvent(ctx, entity.NodeSaleEvent{
TxHash: event.Transaction.TxHash.String(),
TxIndex: int32(event.Transaction.Index),
Action: int32(event.EventMessage.Action),
RawMessage: event.RawData,
ParsedMessage: event.EventJson,
BlockTimestamp: block.Header.Timestamp,
BlockHash: event.Transaction.BlockHash.String(),
BlockHeight: event.Transaction.BlockHeight,
Valid: validator.Valid,
WalletAddress: p.PubkeyToPkHashAddress(event.TxPubkey).EncodeAddress(),
Metadata: nil,
Reason: validator.Reason,
})
if err != nil {
return errors.Wrap(err, "Failed to insert event")
}
if validator.Valid {
tiers := make([][]byte, len(deploy.Tiers))
for i, tier := range deploy.Tiers {
tierJson, err := protojson.Marshal(tier)
if err != nil {
return errors.Wrap(err, "Failed to parse tiers to json")
}
tiers[i] = tierJson
}
err = qtx.CreateNodeSale(ctx, entity.NodeSale{
BlockHeight: uint64(event.Transaction.BlockHeight),
TxIndex: event.Transaction.Index,
Name: deploy.Name,
StartsAt: time.Unix(int64(deploy.StartsAt), 0),
EndsAt: time.Unix(int64(deploy.EndsAt), 0),
Tiers: tiers,
SellerPublicKey: deploy.SellerPublicKey,
MaxPerAddress: deploy.MaxPerAddress,
DeployTxHash: event.Transaction.TxHash.String(),
MaxDiscountPercentage: int32(deploy.MaxDiscountPercentage),
SellerWallet: deploy.SellerWallet,
})
if err != nil {
return errors.Wrap(err, "Failed to insert NodeSale")
}
}
return nil
}

View File

@@ -0,0 +1,139 @@
package nodesale
import (
"context"
"encoding/hex"
"testing"
"time"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway/mocks"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
"github.com/gaze-network/indexer-network/modules/nodesale/protobuf"
"github.com/samber/lo"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/encoding/protojson"
)
func TestDeployInvalid(t *testing.T) {
ctx := context.Background()
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
prvKey, err := btcec.NewPrivateKey()
require.NoError(t, err)
strangerKey, err := btcec.NewPrivateKey()
require.NoError(t, err)
strangerPubkeyHex := hex.EncodeToString(strangerKey.PubKey().SerializeCompressed())
sellerWallet := p.PubkeyToPkHashAddress(prvKey.PubKey())
message := &protobuf.NodeSaleEvent{
Action: protobuf.Action_ACTION_DEPLOY,
Deploy: &protobuf.ActionDeploy{
Name: t.Name(),
StartsAt: 100,
EndsAt: 200,
Tiers: []*protobuf.Tier{
{
PriceSat: 100,
Limit: 5,
MaxPerAddress: 100,
},
{
PriceSat: 200,
Limit: 5,
MaxPerAddress: 100,
},
},
SellerPublicKey: strangerPubkeyHex,
MaxPerAddress: 100,
MaxDiscountPercentage: 50,
SellerWallet: sellerWallet.EncodeAddress(),
},
}
event, block := assembleTestEvent(prvKey, "0101010101", "0101010101", 0, 0, message)
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
return event.Valid == false
})).Return(nil)
err = p.ProcessDeploy(ctx, mockDgTx, block, event)
require.NoError(t, err)
mockDgTx.AssertNotCalled(t, "CreateNodeSale")
}
func TestDeployValid(t *testing.T) {
ctx := context.Background()
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
privateKey, err := btcec.NewPrivateKey()
require.NoError(t, err)
pubkeyHex := hex.EncodeToString(privateKey.PubKey().SerializeCompressed())
sellerWallet := p.PubkeyToPkHashAddress(privateKey.PubKey())
startAt := time.Now().Add(time.Hour * -1)
endAt := time.Now().Add(time.Hour * 1)
message := &protobuf.NodeSaleEvent{
Action: protobuf.Action_ACTION_DEPLOY,
Deploy: &protobuf.ActionDeploy{
Name: t.Name(),
StartsAt: uint32(startAt.UTC().Unix()),
EndsAt: uint32(endAt.UTC().Unix()),
Tiers: []*protobuf.Tier{
{
PriceSat: 100,
Limit: 5,
MaxPerAddress: 100,
},
{
PriceSat: 200,
Limit: 5,
MaxPerAddress: 100,
},
},
SellerPublicKey: pubkeyHex,
MaxPerAddress: 100,
MaxDiscountPercentage: 50,
SellerWallet: sellerWallet.EncodeAddress(),
},
}
event, block := assembleTestEvent(privateKey, "0202020202", "0202020202", 0, 0, message)
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
return event.Valid == true
})).Return(nil)
tiers := lo.Map(message.Deploy.Tiers, func(tier *protobuf.Tier, _ int) []byte {
tierJson, err := protojson.Marshal(tier)
require.NoError(t, err)
return tierJson
})
mockDgTx.EXPECT().CreateNodeSale(mock.Anything, entity.NodeSale{
BlockHeight: uint64(event.Transaction.BlockHeight),
TxIndex: uint32(event.Transaction.Index),
Name: message.Deploy.Name,
StartsAt: time.Unix(int64(message.Deploy.StartsAt), 0),
EndsAt: time.Unix(int64(message.Deploy.EndsAt), 0),
Tiers: tiers,
SellerPublicKey: message.Deploy.SellerPublicKey,
MaxPerAddress: message.Deploy.MaxPerAddress,
DeployTxHash: event.Transaction.TxHash.String(),
MaxDiscountPercentage: int32(message.Deploy.MaxDiscountPercentage),
SellerWallet: message.Deploy.SellerWallet,
}).Return(nil)
p.ProcessDeploy(ctx, mockDgTx, block, event)
}

View File

@@ -0,0 +1,55 @@
package entity
import "time"
type Block struct {
BlockHeight int64
BlockHash string
Module string
}
type Node struct {
SaleBlock uint64
SaleTxIndex uint32
NodeID uint32
TierIndex int32
DelegatedTo string
OwnerPublicKey string
PurchaseTxHash string
DelegateTxHash string
}
type NodeSale struct {
BlockHeight uint64
TxIndex uint32
Name string
StartsAt time.Time
EndsAt time.Time
Tiers [][]byte
SellerPublicKey string
MaxPerAddress uint32
DeployTxHash string
MaxDiscountPercentage int32
SellerWallet string
}
type NodeSaleEvent struct {
TxHash string
BlockHeight int64
TxIndex int32
WalletAddress string
Valid bool
Action int32
RawMessage []byte
ParsedMessage []byte
BlockTimestamp time.Time
BlockHash string
Metadata *MetadataEventPurchase
Reason string
}
type MetadataEventPurchase struct {
ExpectedTotalAmountDiscounted uint64
ReportedTotalAmount uint64
PaidTotalAmount uint64
}

View File

@@ -0,0 +1,51 @@
package delegate
import (
"context"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/validator"
"github.com/gaze-network/indexer-network/modules/nodesale/protobuf"
)
type DelegateValidator struct {
validator.Validator
}
func New() *DelegateValidator {
v := validator.New()
return &DelegateValidator{
Validator: *v,
}
}
func (v *DelegateValidator) NodesExist(
ctx context.Context,
qtx datagateway.NodeSaleDataGatewayWithTx,
deployId *protobuf.ActionID,
nodeIds []uint32,
) (bool, []entity.Node, error) {
if !v.Valid {
return false, nil, nil
}
nodes, err := qtx.GetNodesByIds(ctx, datagateway.GetNodesByIdsParams{
SaleBlock: deployId.Block,
SaleTxIndex: deployId.TxIndex,
NodeIds: nodeIds,
})
if err != nil {
v.Valid = false
return v.Valid, nil, errors.Wrap(err, "Failed to get nodes")
}
if len(nodeIds) != len(nodes) {
v.Valid = false
return v.Valid, nil, nil
}
v.Valid = true
return v.Valid, nodes, nil
}

View File

@@ -0,0 +1,6 @@
package validator
const (
INVALID_PUBKEY_FORMAT = "Cannot parse public key"
INVALID_PUBKEY = "Invalid public key"
)

View File

@@ -0,0 +1,17 @@
package purchase
const (
DEPLOYID_NOT_FOUND = "Depoloy ID not found."
PURCHASE_TIMEOUT = "Purchase timeout."
BLOCK_HEIGHT_TIMEOUT = "Block height over timeout block"
INVALID_SIGNATURE_FORMAT = "Cannot parse signature."
INVALID_SIGNATURE = "Invalid Signature."
INVALID_TIER_JSON = "Invalid Tier format"
INVALID_NODE_ID = "Invalid NodeId."
NODE_ALREADY_PURCHASED = "Some node has been purchased."
INVALID_SELLER_ADDR_FORMAT = "Invalid seller address."
INVALID_PAYMENT = "Total amount paid less than reported price"
INSUFFICIENT_FUND = "Insufficient fund"
OVER_LIMIT_PER_ADDR = "Purchase over limit per address."
OVER_LIMIT_PER_TIER = "Purchase over limit per tier."
)

View File

@@ -0,0 +1,283 @@
package purchase
import (
"context"
"encoding/hex"
"slices"
"time"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcec/v2/ecdsa"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/validator"
"github.com/gaze-network/indexer-network/modules/nodesale/protobuf"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
)
type PurchaseValidator struct {
validator.Validator
}
func New() *PurchaseValidator {
v := validator.New()
return &PurchaseValidator{
Validator: *v,
}
}
func (v *PurchaseValidator) NodeSaleExists(ctx context.Context, qtx datagateway.NodeSaleDataGatewayWithTx, payload *protobuf.PurchasePayload) (bool, *entity.NodeSale, error) {
if !v.Valid {
return false, nil, nil
}
// check node existed
deploys, err := qtx.GetNodeSale(ctx, datagateway.GetNodeSaleParams{
BlockHeight: payload.DeployID.Block,
TxIndex: payload.DeployID.TxIndex,
})
if err != nil {
v.Valid = false
return v.Valid, nil, errors.Wrap(err, "Failed to Get NodeSale")
}
if len(deploys) < 1 {
v.Valid = false
v.Reason = DEPLOYID_NOT_FOUND
return v.Valid, nil, nil
}
v.Valid = true
return v.Valid, &deploys[0], nil
}
func (v *PurchaseValidator) ValidTimestamp(deploy *entity.NodeSale, timestamp time.Time) bool {
if !v.Valid {
return false
}
if timestamp.Before(deploy.StartsAt) ||
timestamp.After(deploy.EndsAt) {
v.Valid = false
v.Reason = PURCHASE_TIMEOUT
return v.Valid
}
v.Valid = true
return v.Valid
}
func (v *PurchaseValidator) WithinTimeoutBlock(timeOutBlock uint64, blockHeight uint64) bool {
if !v.Valid {
return false
}
if timeOutBlock == 0 {
// No timeout
v.Valid = true
return v.Valid
}
if timeOutBlock < blockHeight {
v.Valid = false
v.Reason = BLOCK_HEIGHT_TIMEOUT
return v.Valid
}
v.Valid = true
return v.Valid
}
func (v *PurchaseValidator) VerifySignature(purchase *protobuf.ActionPurchase, deploy *entity.NodeSale) bool {
if !v.Valid {
return false
}
payload := purchase.Payload
payloadBytes, _ := proto.Marshal(payload)
signatureBytes, _ := hex.DecodeString(purchase.SellerSignature)
signature, err := ecdsa.ParseSignature(signatureBytes)
if err != nil {
v.Valid = false
v.Reason = INVALID_SIGNATURE_FORMAT
return v.Valid
}
hash := chainhash.DoubleHashB(payloadBytes)
pubkeyBytes, _ := hex.DecodeString(deploy.SellerPublicKey)
pubKey, _ := btcec.ParsePubKey(pubkeyBytes)
verified := signature.Verify(hash[:], pubKey)
if !verified {
v.Valid = false
v.Reason = INVALID_SIGNATURE
return v.Valid
}
v.Valid = true
return v.Valid
}
type TierMap struct {
Tiers []protobuf.Tier
BuyingTiersCount []uint32
NodeIdToTier map[uint32]int32
}
func (v *PurchaseValidator) ValidTiers(
payload *protobuf.PurchasePayload,
deploy *entity.NodeSale,
) (bool, TierMap) {
if !v.Valid {
return false, TierMap{}
}
tiers := make([]protobuf.Tier, len(deploy.Tiers))
buyingTiersCount := make([]uint32, len(tiers))
nodeIdToTier := make(map[uint32]int32)
for i, tierJson := range deploy.Tiers {
tier := &tiers[i]
err := protojson.Unmarshal(tierJson, tier)
if err != nil {
v.Valid = false
v.Reason = INVALID_TIER_JSON
return v.Valid, TierMap{}
}
}
slices.Sort(payload.NodeIDs)
var currentTier int32 = -1
var tierSum uint32 = 0
for _, nodeId := range payload.NodeIDs {
for nodeId >= tierSum && currentTier < int32(len(tiers)-1) {
currentTier++
tierSum += tiers[currentTier].Limit
}
if nodeId < tierSum {
buyingTiersCount[currentTier]++
nodeIdToTier[nodeId] = currentTier
} else {
v.Valid = false
v.Reason = INVALID_NODE_ID
return false, TierMap{}
}
}
v.Valid = true
return v.Valid, TierMap{
Tiers: tiers,
BuyingTiersCount: buyingTiersCount,
NodeIdToTier: nodeIdToTier,
}
}
func (v *PurchaseValidator) ValidUnpurchasedNodes(
ctx context.Context,
qtx datagateway.NodeSaleDataGatewayWithTx,
payload *protobuf.PurchasePayload,
) (bool, error) {
if !v.Valid {
return false, nil
}
// valid unpurchased node ID
nodes, err := qtx.GetNodesByIds(ctx, datagateway.GetNodesByIdsParams{
SaleBlock: payload.DeployID.Block,
SaleTxIndex: payload.DeployID.TxIndex,
NodeIds: payload.NodeIDs,
})
if err != nil {
v.Valid = false
return v.Valid, errors.Wrap(err, "Failed to Get nodes")
}
if len(nodes) > 0 {
v.Valid = false
v.Reason = NODE_ALREADY_PURCHASED
return false, nil
}
v.Valid = true
return true, nil
}
func (v *PurchaseValidator) ValidPaidAmount(
payload *protobuf.PurchasePayload,
deploy *entity.NodeSale,
txPaid uint64,
tiers []protobuf.Tier,
buyingTiersCount []uint32,
network *chaincfg.Params,
) (bool, *entity.MetadataEventPurchase) {
if !v.Valid {
return false, nil
}
meta := entity.MetadataEventPurchase{}
meta.PaidTotalAmount = txPaid
meta.ReportedTotalAmount = uint64(payload.TotalAmountSat)
// total amount paid is greater than report paid
if txPaid < uint64(payload.TotalAmountSat) {
v.Valid = false
v.Reason = INVALID_PAYMENT
return v.Valid, nil
}
// calculate total price
var totalPrice uint64 = 0
for i := 0; i < len(tiers); i++ {
totalPrice += uint64(buyingTiersCount[i] * tiers[i].PriceSat)
}
// report paid is greater than max discounted total price
maxDiscounted := totalPrice * (100 - uint64(deploy.MaxDiscountPercentage))
decimal := maxDiscounted % 100
maxDiscounted /= 100
if decimal%100 >= 50 {
maxDiscounted++
}
meta.ExpectedTotalAmountDiscounted = maxDiscounted
if uint64(payload.TotalAmountSat) < maxDiscounted {
v.Valid = false
v.Reason = INSUFFICIENT_FUND
return v.Valid, nil
}
v.Valid = true
return v.Valid, &meta
}
func (v *PurchaseValidator) WithinLimit(
ctx context.Context,
qtx datagateway.NodeSaleDataGatewayWithTx,
payload *protobuf.PurchasePayload,
deploy *entity.NodeSale,
tiers []protobuf.Tier,
buyingTiersCount []uint32,
) (bool, error) {
if !v.Valid {
return false, nil
}
// check node limit
// get all selled by seller and owned by buyer
buyerOwnedNodes, err := qtx.GetNodesByOwner(ctx, datagateway.GetNodesByOwnerParams{
SaleBlock: deploy.BlockHeight,
SaleTxIndex: deploy.TxIndex,
OwnerPublicKey: payload.BuyerPublicKey,
})
if err != nil {
v.Valid = false
return v.Valid, errors.Wrap(err, "Failed to GetNodesByOwner")
}
if len(buyerOwnedNodes)+len(payload.NodeIDs) > int(deploy.MaxPerAddress) {
v.Valid = false
v.Reason = "Purchase over limit per address."
return v.Valid, nil
}
// check limit
// count each tiers
// check limited for each tier
ownedTiersCount := make([]uint32, len(tiers))
for _, node := range buyerOwnedNodes {
ownedTiersCount[node.TierIndex]++
}
for i := 0; i < len(tiers); i++ {
if ownedTiersCount[i]+buyingTiersCount[i] > tiers[i].MaxPerAddress {
v.Valid = false
v.Reason = "Purchase over limit per tier."
return v.Valid, nil
}
}
v.Valid = true
return v.Valid, nil
}

View File

@@ -0,0 +1,44 @@
package validator
import (
"bytes"
"encoding/hex"
"github.com/btcsuite/btcd/btcec/v2"
)
type Validator struct {
Valid bool
Reason string
}
func New() *Validator {
return &Validator{
Valid: true,
}
}
func (v *Validator) EqualXonlyPublicKey(target string, expected *btcec.PublicKey) bool {
if !v.Valid {
return false
}
targetBytes, err := hex.DecodeString(target)
if err != nil {
v.Valid = false
v.Reason = INVALID_PUBKEY_FORMAT
}
targetPubKey, err := btcec.ParsePubKey(targetBytes)
if err != nil {
v.Valid = false
v.Reason = INVALID_PUBKEY_FORMAT
}
xOnlyTargetPubKey := btcec.ToSerialized(targetPubKey).SchnorrSerialized()
xOnlyExpectedPubKey := btcec.ToSerialized(expected).SchnorrSerialized()
v.Valid = bytes.Equal(xOnlyTargetPubKey[:], xOnlyExpectedPubKey[:])
if !v.Valid {
v.Reason = INVALID_PUBKEY
}
return v.Valid
}

View File

@@ -0,0 +1,61 @@
package nodesale
import (
"context"
"fmt"
"github.com/btcsuite/btcd/rpcclient"
"github.com/gaze-network/indexer-network/core/datasources"
"github.com/gaze-network/indexer-network/core/indexer"
"github.com/gaze-network/indexer-network/internal/config"
"github.com/gaze-network/indexer-network/internal/postgres"
"github.com/gaze-network/indexer-network/modules/nodesale/api/httphandler"
repository "github.com/gaze-network/indexer-network/modules/nodesale/repository/postgres"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gofiber/fiber/v2"
"github.com/samber/do/v2"
)
var NODESALE_MAGIC = []byte{0x6e, 0x73, 0x6f, 0x70}
const (
Version = "v0.0.1-alpha"
)
func New(injector do.Injector) (indexer.IndexerWorker, error) {
ctx := do.MustInvoke[context.Context](injector)
conf := do.MustInvoke[config.Config](injector)
btcClient := do.MustInvoke[*rpcclient.Client](injector)
datasource := datasources.NewBitcoinNode(btcClient)
pg, err := postgres.NewPool(ctx, conf.Modules.NodeSale.Postgres)
if err != nil {
return nil, fmt.Errorf("Can't create postgres connection : %w", err)
}
var cleanupFuncs []func(context.Context) error
cleanupFuncs = append(cleanupFuncs, func(ctx context.Context) error {
pg.Close()
return nil
})
repository := repository.NewRepository(pg)
processor := &Processor{
NodeSaleDg: repository,
BtcClient: datasource,
Network: conf.Network,
cleanupFuncs: cleanupFuncs,
lastBlockDefault: conf.Modules.NodeSale.LastBlockDefault,
}
httpServer := do.MustInvoke[*fiber.App](injector)
nodeSaleHandler := httphandler.New(repository)
if err := nodeSaleHandler.Mount(httpServer); err != nil {
return nil, fmt.Errorf("Can't mount nodesale API : %w", err)
}
logger.InfoContext(ctx, "Mounted nodesale HTTP handler")
indexer := indexer.New(processor, datasource)
logger.InfoContext(ctx, "NodeSale module started.")
return indexer, nil
}

View File

@@ -0,0 +1,61 @@
package nodesale
import (
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/decred/dcrd/dcrec/secp256k1/v4"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/nodesale/protobuf"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
)
var (
testBlockHeight uint64 = 101
testTxIndex uint32 = 1
)
func assembleTestEvent(privateKey *secp256k1.PrivateKey, blockHashHex, txHashHex string, blockHeight uint64, txIndex uint32, message *protobuf.NodeSaleEvent) (NodeSaleEvent, *types.Block) {
blockHash, _ := chainhash.NewHashFromStr(blockHashHex)
txHash, _ := chainhash.NewHashFromStr(txHashHex)
rawData, _ := proto.Marshal(message)
builder := txscript.NewScriptBuilder()
builder.AddOp(txscript.OP_FALSE)
builder.AddOp(txscript.OP_IF)
builder.AddData(rawData)
builder.AddOp(txscript.OP_ENDIF)
messageJson, _ := protojson.Marshal(message)
if blockHeight == 0 {
blockHeight = testBlockHeight
testBlockHeight++
}
if txIndex == 0 {
txIndex = testTxIndex
testTxIndex++
}
event := NodeSaleEvent{
Transaction: &types.Transaction{
BlockHeight: int64(blockHeight),
BlockHash: *blockHash,
Index: uint32(txIndex),
TxHash: *txHash,
},
RawData: rawData,
EventMessage: message,
EventJson: messageJson,
TxPubkey: privateKey.PubKey(),
}
block := &types.Block{
Header: types.BlockHeader{
Timestamp: time.Now().UTC(),
},
}
return event, block
}

View File

@@ -0,0 +1,303 @@
package nodesale
import (
"bytes"
"context"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/core/indexer"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/core/datasources"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
"github.com/gaze-network/indexer-network/modules/nodesale/protobuf"
)
type NodeSaleEvent struct {
Transaction *types.Transaction
EventMessage *protobuf.NodeSaleEvent
EventJson []byte
TxPubkey *btcec.PublicKey
RawData []byte
InputValue uint64
}
func NewProcessor(repository datagateway.NodeSaleDataGateway,
datasource *datasources.BitcoinNodeDatasource,
network common.Network,
cleanupFuncs []func(context.Context) error,
lastBlockDefault int64,
) *Processor {
return &Processor{
NodeSaleDg: repository,
BtcClient: datasource,
Network: network,
cleanupFuncs: cleanupFuncs,
lastBlockDefault: lastBlockDefault,
}
}
func (p *Processor) Shutdown(ctx context.Context) error {
for _, cleanupFunc := range p.cleanupFuncs {
err := cleanupFunc(ctx)
if err != nil {
return errors.Wrap(err, "cleanup function error")
}
}
return nil
}
type Processor struct {
NodeSaleDg datagateway.NodeSaleDataGateway
BtcClient *datasources.BitcoinNodeDatasource
Network common.Network
cleanupFuncs []func(context.Context) error
lastBlockDefault int64
}
// CurrentBlock implements indexer.Processor.
func (p *Processor) CurrentBlock(ctx context.Context) (types.BlockHeader, error) {
block, err := p.NodeSaleDg.GetLastProcessedBlock(ctx)
if err != nil {
logger.InfoContext(ctx, "Couldn't get last processed block. Start from NODESALE_LAST_BLOCK_DEFAULT.",
slogx.Int64("currentBlock", p.lastBlockDefault))
header, err := p.BtcClient.GetBlockHeader(ctx, p.lastBlockDefault)
if err != nil {
return types.BlockHeader{}, errors.Wrap(err, "Cannot get default block from bitcoin node")
}
return types.BlockHeader{
Hash: header.Hash,
Height: p.lastBlockDefault,
}, nil
}
hash, err := chainhash.NewHashFromStr(block.BlockHash)
if err != nil {
logger.PanicContext(ctx, "Invalid hash format found in Database.")
}
return types.BlockHeader{
Hash: *hash,
Height: block.BlockHeight,
}, nil
}
// GetIndexedBlock implements indexer.Processor.
func (p *Processor) GetIndexedBlock(ctx context.Context, height int64) (types.BlockHeader, error) {
block, err := p.NodeSaleDg.GetBlock(ctx, height)
if err != nil {
return types.BlockHeader{}, errors.Wrapf(err, "Block %d not found", height)
}
hash, err := chainhash.NewHashFromStr(block.BlockHash)
if err != nil {
logger.PanicContext(ctx, "Invalid hash format found in Database.")
}
return types.BlockHeader{
Hash: *hash,
Height: block.BlockHeight,
}, nil
}
// Name implements indexer.Processor.
func (p *Processor) Name() string {
return "nodesale"
}
func extractNodeSaleData(witness [][]byte) (data []byte, internalPubkey *btcec.PublicKey, isNodeSale bool) {
tokenizer, controlBlock, isTapScript := extractTapScript(witness)
if !isTapScript {
return []byte{}, nil, false
}
state := 0
for tokenizer.Next() {
switch state {
case 0:
if tokenizer.Opcode() == txscript.OP_0 {
state++
} else {
state = 0
}
case 1:
if tokenizer.Opcode() == txscript.OP_IF {
state++
} else {
state = 0
}
case 2:
if tokenizer.Opcode() == txscript.OP_DATA_4 &&
bytes.Equal(tokenizer.Data(), NODESALE_MAGIC) {
state++
} else {
state = 0
}
case 3:
// Any instruction > txscript.OP_16 is not push data. Note: txscript.OP_PUSHDATAX < txscript.OP_16
if tokenizer.Opcode() <= txscript.OP_16 {
data := tokenizer.Data()
return data, controlBlock.InternalKey, true
}
state = 0
}
}
return []byte{}, nil, false
}
func (p *Processor) parseTransactions(ctx context.Context, transactions []*types.Transaction) ([]NodeSaleEvent, error) {
var events []NodeSaleEvent
for _, t := range transactions {
for _, txIn := range t.TxIn {
data, txPubkey, isNodeSale := extractNodeSaleData(txIn.Witness)
if !isNodeSale {
continue
}
event := &protobuf.NodeSaleEvent{}
err := proto.Unmarshal(data, event)
if err != nil {
logger.WarnContext(ctx, "Invalid Protobuf",
slogx.String("block_hash", t.BlockHash.String()),
slogx.Int("txIndex", int(t.Index)))
continue
}
eventJson, err := protojson.Marshal(event)
if err != nil {
return []NodeSaleEvent{}, errors.Wrap(err, "Failed to parse protobuf to json")
}
prevTx, _, err := p.BtcClient.GetRawTransactionAndHeightByTxHash(ctx, txIn.PreviousOutTxHash)
if err != nil {
return nil, errors.Wrap(err, "Failed to get Previous transaction data")
}
if txIn.PreviousOutIndex >= uint32(len(prevTx.TxOut)) {
return nil, errors.Wrap(err, "Invalid previous transaction from bitcoin")
}
events = append(events, NodeSaleEvent{
Transaction: t,
EventMessage: event,
EventJson: eventJson,
RawData: data,
TxPubkey: txPubkey,
InputValue: uint64(prevTx.TxOut[txIn.PreviousOutIndex].Value),
})
}
}
return events, nil
}
// Process implements indexer.Processor.
func (p *Processor) Process(ctx context.Context, inputs []*types.Block) error {
for _, block := range inputs {
logger.InfoContext(ctx, "NodeSale processing a block",
slogx.Int64("block", block.Header.Height),
slogx.Stringer("hash", block.Header.Hash))
// parse all event from each transaction including reading tx wallet
events, err := p.parseTransactions(ctx, block.Transactions)
if err != nil {
return errors.Wrap(err, "Invalid data from bitcoin client")
}
// open transaction
qtx, err := p.NodeSaleDg.BeginNodeSaleTx(ctx)
if err != nil {
return errors.Wrap(err, "Failed to create transaction")
}
defer func() {
err = qtx.Rollback(ctx)
if err != nil {
logger.PanicContext(ctx, "Failed to rollback db")
}
}()
// write block
err = qtx.CreateBlock(ctx, entity.Block{
BlockHeight: block.Header.Height,
BlockHash: block.Header.Hash.String(),
Module: p.Name(),
})
if err != nil {
return errors.Wrapf(err, "Failed to add block %d", block.Header.Height)
}
// for each events
for _, event := range events {
logger.InfoContext(ctx, "NodeSale processing event",
slogx.Uint32("txIndex", event.Transaction.Index),
slogx.Int64("blockHeight", block.Header.Height),
slogx.Stringer("blockhash", block.Header.Hash),
)
eventMessage := event.EventMessage
switch eventMessage.Action {
case protobuf.Action_ACTION_DEPLOY:
err = p.ProcessDeploy(ctx, qtx, block, event)
if err != nil {
return errors.Wrapf(err, "Failed to deploy at block %d", block.Header.Height)
}
case protobuf.Action_ACTION_DELEGATE:
err = p.ProcessDelegate(ctx, qtx, block, event)
if err != nil {
return errors.Wrapf(err, "Failed to delegate at block %d", block.Header.Height)
}
case protobuf.Action_ACTION_PURCHASE:
err = p.ProcessPurchase(ctx, qtx, block, event)
if err != nil {
return errors.Wrapf(err, "Failed to purchase at block %d", block.Header.Height)
}
default:
logger.DebugContext(ctx, "Invalid event ACTION", slogx.Stringer("txHash", (event.Transaction.TxHash)))
}
}
// close transaction
err = qtx.Commit(ctx)
if err != nil {
return errors.Wrap(err, "Failed to commit transaction")
}
logger.InfoContext(ctx, "NodeSale finished processing block",
slogx.Int64("block", block.Header.Height),
slogx.Stringer("hash", block.Header.Hash))
}
return nil
}
// RevertData implements indexer.Processor.
func (p *Processor) RevertData(ctx context.Context, from int64) error {
qtx, err := p.NodeSaleDg.BeginNodeSaleTx(ctx)
if err != nil {
return errors.Wrap(err, "Failed to create transaction")
}
defer func() { err = qtx.Rollback(ctx) }()
_, err = qtx.RemoveBlockFrom(ctx, from)
if err != nil {
return errors.Wrap(err, "Failed to remove blocks.")
}
affected, err := qtx.RemoveEventsFromBlock(ctx, from)
if err != nil {
return errors.Wrap(err, "Failed to remove events.")
}
_, err = qtx.ClearDelegate(ctx)
if err != nil {
return errors.Wrap(err, "Failed to clear delegate from nodes")
}
err = qtx.Commit(ctx)
if err != nil {
return errors.Wrap(err, "Failed to commit transaction")
}
logger.InfoContext(ctx, "Events removed",
slogx.Int64("Total removed", affected))
return nil
}
// VerifyStates implements indexer.Processor.
func (p *Processor) VerifyStates(ctx context.Context) error {
panic("unimplemented")
}
var _ indexer.Processor[*types.Block] = (*Processor)(nil)

View File

@@ -0,0 +1,806 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.34.1
// protoc v5.26.1
// source: modules/nodesale/protobuf/nodesale.proto
// protoc modules/nodesale/protobuf/nodesale.proto --go_out=. --go_opt=module=github.com/gaze-network/indexer-network
package protobuf
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type Action int32
const (
Action_ACTION_DEPLOY Action = 0
Action_ACTION_PURCHASE Action = 1
Action_ACTION_DELEGATE Action = 2
)
// Enum value maps for Action.
var (
Action_name = map[int32]string{
0: "ACTION_DEPLOY",
1: "ACTION_PURCHASE",
2: "ACTION_DELEGATE",
}
Action_value = map[string]int32{
"ACTION_DEPLOY": 0,
"ACTION_PURCHASE": 1,
"ACTION_DELEGATE": 2,
}
)
func (x Action) Enum() *Action {
p := new(Action)
*p = x
return p
}
func (x Action) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (Action) Descriptor() protoreflect.EnumDescriptor {
return file_modules_nodesale_protobuf_nodesale_proto_enumTypes[0].Descriptor()
}
func (Action) Type() protoreflect.EnumType {
return &file_modules_nodesale_protobuf_nodesale_proto_enumTypes[0]
}
func (x Action) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use Action.Descriptor instead.
func (Action) EnumDescriptor() ([]byte, []int) {
return file_modules_nodesale_protobuf_nodesale_proto_rawDescGZIP(), []int{0}
}
type NodeSaleEvent struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Action Action `protobuf:"varint,1,opt,name=action,proto3,enum=nodesale.Action" json:"action,omitempty"`
Deploy *ActionDeploy `protobuf:"bytes,2,opt,name=deploy,proto3,oneof" json:"deploy,omitempty"`
Purchase *ActionPurchase `protobuf:"bytes,3,opt,name=purchase,proto3,oneof" json:"purchase,omitempty"`
Delegate *ActionDelegate `protobuf:"bytes,4,opt,name=delegate,proto3,oneof" json:"delegate,omitempty"`
}
func (x *NodeSaleEvent) Reset() {
*x = NodeSaleEvent{}
if protoimpl.UnsafeEnabled {
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *NodeSaleEvent) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NodeSaleEvent) ProtoMessage() {}
func (x *NodeSaleEvent) ProtoReflect() protoreflect.Message {
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NodeSaleEvent.ProtoReflect.Descriptor instead.
func (*NodeSaleEvent) Descriptor() ([]byte, []int) {
return file_modules_nodesale_protobuf_nodesale_proto_rawDescGZIP(), []int{0}
}
func (x *NodeSaleEvent) GetAction() Action {
if x != nil {
return x.Action
}
return Action_ACTION_DEPLOY
}
func (x *NodeSaleEvent) GetDeploy() *ActionDeploy {
if x != nil {
return x.Deploy
}
return nil
}
func (x *NodeSaleEvent) GetPurchase() *ActionPurchase {
if x != nil {
return x.Purchase
}
return nil
}
func (x *NodeSaleEvent) GetDelegate() *ActionDelegate {
if x != nil {
return x.Delegate
}
return nil
}
type ActionDeploy struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
StartsAt uint32 `protobuf:"varint,2,opt,name=startsAt,proto3" json:"startsAt,omitempty"`
EndsAt uint32 `protobuf:"varint,3,opt,name=endsAt,proto3" json:"endsAt,omitempty"`
Tiers []*Tier `protobuf:"bytes,4,rep,name=tiers,proto3" json:"tiers,omitempty"`
SellerPublicKey string `protobuf:"bytes,5,opt,name=sellerPublicKey,proto3" json:"sellerPublicKey,omitempty"`
MaxPerAddress uint32 `protobuf:"varint,6,opt,name=maxPerAddress,proto3" json:"maxPerAddress,omitempty"`
MaxDiscountPercentage uint32 `protobuf:"varint,7,opt,name=maxDiscountPercentage,proto3" json:"maxDiscountPercentage,omitempty"`
SellerWallet string `protobuf:"bytes,8,opt,name=sellerWallet,proto3" json:"sellerWallet,omitempty"`
}
func (x *ActionDeploy) Reset() {
*x = ActionDeploy{}
if protoimpl.UnsafeEnabled {
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ActionDeploy) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ActionDeploy) ProtoMessage() {}
func (x *ActionDeploy) ProtoReflect() protoreflect.Message {
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ActionDeploy.ProtoReflect.Descriptor instead.
func (*ActionDeploy) Descriptor() ([]byte, []int) {
return file_modules_nodesale_protobuf_nodesale_proto_rawDescGZIP(), []int{1}
}
func (x *ActionDeploy) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *ActionDeploy) GetStartsAt() uint32 {
if x != nil {
return x.StartsAt
}
return 0
}
func (x *ActionDeploy) GetEndsAt() uint32 {
if x != nil {
return x.EndsAt
}
return 0
}
func (x *ActionDeploy) GetTiers() []*Tier {
if x != nil {
return x.Tiers
}
return nil
}
func (x *ActionDeploy) GetSellerPublicKey() string {
if x != nil {
return x.SellerPublicKey
}
return ""
}
func (x *ActionDeploy) GetMaxPerAddress() uint32 {
if x != nil {
return x.MaxPerAddress
}
return 0
}
func (x *ActionDeploy) GetMaxDiscountPercentage() uint32 {
if x != nil {
return x.MaxDiscountPercentage
}
return 0
}
func (x *ActionDeploy) GetSellerWallet() string {
if x != nil {
return x.SellerWallet
}
return ""
}
type Tier struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
PriceSat uint32 `protobuf:"varint,1,opt,name=priceSat,proto3" json:"priceSat,omitempty"`
Limit uint32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"`
MaxPerAddress uint32 `protobuf:"varint,3,opt,name=maxPerAddress,proto3" json:"maxPerAddress,omitempty"`
}
func (x *Tier) Reset() {
*x = Tier{}
if protoimpl.UnsafeEnabled {
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Tier) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Tier) ProtoMessage() {}
func (x *Tier) ProtoReflect() protoreflect.Message {
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Tier.ProtoReflect.Descriptor instead.
func (*Tier) Descriptor() ([]byte, []int) {
return file_modules_nodesale_protobuf_nodesale_proto_rawDescGZIP(), []int{2}
}
func (x *Tier) GetPriceSat() uint32 {
if x != nil {
return x.PriceSat
}
return 0
}
func (x *Tier) GetLimit() uint32 {
if x != nil {
return x.Limit
}
return 0
}
func (x *Tier) GetMaxPerAddress() uint32 {
if x != nil {
return x.MaxPerAddress
}
return 0
}
type ActionPurchase struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Payload *PurchasePayload `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"`
SellerSignature string `protobuf:"bytes,2,opt,name=sellerSignature,proto3" json:"sellerSignature,omitempty"`
}
func (x *ActionPurchase) Reset() {
*x = ActionPurchase{}
if protoimpl.UnsafeEnabled {
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ActionPurchase) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ActionPurchase) ProtoMessage() {}
func (x *ActionPurchase) ProtoReflect() protoreflect.Message {
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ActionPurchase.ProtoReflect.Descriptor instead.
func (*ActionPurchase) Descriptor() ([]byte, []int) {
return file_modules_nodesale_protobuf_nodesale_proto_rawDescGZIP(), []int{3}
}
func (x *ActionPurchase) GetPayload() *PurchasePayload {
if x != nil {
return x.Payload
}
return nil
}
func (x *ActionPurchase) GetSellerSignature() string {
if x != nil {
return x.SellerSignature
}
return ""
}
type PurchasePayload struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
DeployID *ActionID `protobuf:"bytes,1,opt,name=deployID,proto3" json:"deployID,omitempty"`
BuyerPublicKey string `protobuf:"bytes,2,opt,name=buyerPublicKey,proto3" json:"buyerPublicKey,omitempty"`
NodeIDs []uint32 `protobuf:"varint,3,rep,packed,name=nodeIDs,proto3" json:"nodeIDs,omitempty"`
TotalAmountSat int64 `protobuf:"varint,4,opt,name=totalAmountSat,proto3" json:"totalAmountSat,omitempty"`
TimeOutBlock uint64 `protobuf:"varint,5,opt,name=timeOutBlock,proto3" json:"timeOutBlock,omitempty"`
}
func (x *PurchasePayload) Reset() {
*x = PurchasePayload{}
if protoimpl.UnsafeEnabled {
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *PurchasePayload) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PurchasePayload) ProtoMessage() {}
func (x *PurchasePayload) ProtoReflect() protoreflect.Message {
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PurchasePayload.ProtoReflect.Descriptor instead.
func (*PurchasePayload) Descriptor() ([]byte, []int) {
return file_modules_nodesale_protobuf_nodesale_proto_rawDescGZIP(), []int{4}
}
func (x *PurchasePayload) GetDeployID() *ActionID {
if x != nil {
return x.DeployID
}
return nil
}
func (x *PurchasePayload) GetBuyerPublicKey() string {
if x != nil {
return x.BuyerPublicKey
}
return ""
}
func (x *PurchasePayload) GetNodeIDs() []uint32 {
if x != nil {
return x.NodeIDs
}
return nil
}
func (x *PurchasePayload) GetTotalAmountSat() int64 {
if x != nil {
return x.TotalAmountSat
}
return 0
}
func (x *PurchasePayload) GetTimeOutBlock() uint64 {
if x != nil {
return x.TimeOutBlock
}
return 0
}
type ActionID struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Block uint64 `protobuf:"varint,1,opt,name=block,proto3" json:"block,omitempty"`
TxIndex uint32 `protobuf:"varint,2,opt,name=txIndex,proto3" json:"txIndex,omitempty"`
}
func (x *ActionID) Reset() {
*x = ActionID{}
if protoimpl.UnsafeEnabled {
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ActionID) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ActionID) ProtoMessage() {}
func (x *ActionID) ProtoReflect() protoreflect.Message {
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ActionID.ProtoReflect.Descriptor instead.
func (*ActionID) Descriptor() ([]byte, []int) {
return file_modules_nodesale_protobuf_nodesale_proto_rawDescGZIP(), []int{5}
}
func (x *ActionID) GetBlock() uint64 {
if x != nil {
return x.Block
}
return 0
}
func (x *ActionID) GetTxIndex() uint32 {
if x != nil {
return x.TxIndex
}
return 0
}
type ActionDelegate struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
DelegateePublicKey string `protobuf:"bytes,1,opt,name=delegateePublicKey,proto3" json:"delegateePublicKey,omitempty"`
NodeIDs []uint32 `protobuf:"varint,2,rep,packed,name=nodeIDs,proto3" json:"nodeIDs,omitempty"`
DeployID *ActionID `protobuf:"bytes,3,opt,name=deployID,proto3" json:"deployID,omitempty"`
}
func (x *ActionDelegate) Reset() {
*x = ActionDelegate{}
if protoimpl.UnsafeEnabled {
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ActionDelegate) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ActionDelegate) ProtoMessage() {}
func (x *ActionDelegate) ProtoReflect() protoreflect.Message {
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ActionDelegate.ProtoReflect.Descriptor instead.
func (*ActionDelegate) Descriptor() ([]byte, []int) {
return file_modules_nodesale_protobuf_nodesale_proto_rawDescGZIP(), []int{6}
}
func (x *ActionDelegate) GetDelegateePublicKey() string {
if x != nil {
return x.DelegateePublicKey
}
return ""
}
func (x *ActionDelegate) GetNodeIDs() []uint32 {
if x != nil {
return x.NodeIDs
}
return nil
}
func (x *ActionDelegate) GetDeployID() *ActionID {
if x != nil {
return x.DeployID
}
return nil
}
var File_modules_nodesale_protobuf_nodesale_proto protoreflect.FileDescriptor
var file_modules_nodesale_protobuf_nodesale_proto_rawDesc = []byte{
0x0a, 0x28, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x61,
0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x6e, 0x6f, 0x64, 0x65,
0x73, 0x61, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x6e, 0x6f, 0x64, 0x65,
0x73, 0x61, 0x6c, 0x65, 0x22, 0x89, 0x02, 0x0a, 0x0d, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x61, 0x6c,
0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x61, 0x6c,
0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
0x12, 0x33, 0x0a, 0x06, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x16, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x61, 0x6c, 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69,
0x6f, 0x6e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x48, 0x00, 0x52, 0x06, 0x64, 0x65, 0x70, 0x6c,
0x6f, 0x79, 0x88, 0x01, 0x01, 0x12, 0x39, 0x0a, 0x08, 0x70, 0x75, 0x72, 0x63, 0x68, 0x61, 0x73,
0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x61,
0x6c, 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x75, 0x72, 0x63, 0x68, 0x61, 0x73,
0x65, 0x48, 0x01, 0x52, 0x08, 0x70, 0x75, 0x72, 0x63, 0x68, 0x61, 0x73, 0x65, 0x88, 0x01, 0x01,
0x12, 0x39, 0x0a, 0x08, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x61, 0x6c, 0x65, 0x2e, 0x41, 0x63,
0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x48, 0x02, 0x52, 0x08,
0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f,
0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x70, 0x75, 0x72, 0x63, 0x68,
0x61, 0x73, 0x65, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65,
0x22, 0xa6, 0x02, 0x0a, 0x0c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x6c, 0x6f,
0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x73, 0x41,
0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x73, 0x41,
0x74, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x64, 0x73, 0x41, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28,
0x0d, 0x52, 0x06, 0x65, 0x6e, 0x64, 0x73, 0x41, 0x74, 0x12, 0x24, 0x0a, 0x05, 0x74, 0x69, 0x65,
0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x73,
0x61, 0x6c, 0x65, 0x2e, 0x54, 0x69, 0x65, 0x72, 0x52, 0x05, 0x74, 0x69, 0x65, 0x72, 0x73, 0x12,
0x28, 0x0a, 0x0f, 0x73, 0x65, 0x6c, 0x6c, 0x65, 0x72, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b,
0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x65, 0x6c, 0x6c, 0x65, 0x72,
0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x0d, 0x6d, 0x61, 0x78,
0x50, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d,
0x52, 0x0d, 0x6d, 0x61, 0x78, 0x50, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12,
0x34, 0x0a, 0x15, 0x6d, 0x61, 0x78, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x65,
0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x15,
0x6d, 0x61, 0x78, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x65, 0x72, 0x63, 0x65,
0x6e, 0x74, 0x61, 0x67, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x65, 0x6c, 0x6c, 0x65, 0x72, 0x57,
0x61, 0x6c, 0x6c, 0x65, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65, 0x6c,
0x6c, 0x65, 0x72, 0x57, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x22, 0x5e, 0x0a, 0x04, 0x54, 0x69, 0x65,
0x72, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x63, 0x65, 0x53, 0x61, 0x74, 0x18, 0x01, 0x20,
0x01, 0x28, 0x0d, 0x52, 0x08, 0x70, 0x72, 0x69, 0x63, 0x65, 0x53, 0x61, 0x74, 0x12, 0x14, 0x0a,
0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69,
0x6d, 0x69, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x6d, 0x61, 0x78, 0x50, 0x65, 0x72, 0x41, 0x64, 0x64,
0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x50,
0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x6f, 0x0a, 0x0e, 0x41, 0x63, 0x74,
0x69, 0x6f, 0x6e, 0x50, 0x75, 0x72, 0x63, 0x68, 0x61, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x70,
0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6e,
0x6f, 0x64, 0x65, 0x73, 0x61, 0x6c, 0x65, 0x2e, 0x50, 0x75, 0x72, 0x63, 0x68, 0x61, 0x73, 0x65,
0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64,
0x12, 0x28, 0x0a, 0x0f, 0x73, 0x65, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74,
0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x65, 0x6c, 0x6c, 0x65,
0x72, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0xcf, 0x01, 0x0a, 0x0f, 0x50,
0x75, 0x72, 0x63, 0x68, 0x61, 0x73, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x2e,
0x0a, 0x08, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x12, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x61, 0x6c, 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69,
0x6f, 0x6e, 0x49, 0x44, 0x52, 0x08, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x49, 0x44, 0x12, 0x26,
0x0a, 0x0e, 0x62, 0x75, 0x79, 0x65, 0x72, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79,
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x62, 0x75, 0x79, 0x65, 0x72, 0x50, 0x75, 0x62,
0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x44,
0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x73,
0x12, 0x26, 0x0a, 0x0e, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x41, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x53,
0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x41,
0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x61, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65,
0x4f, 0x75, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c,
0x74, 0x69, 0x6d, 0x65, 0x4f, 0x75, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x0a, 0x08,
0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63,
0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x18,
0x0a, 0x07, 0x74, 0x78, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52,
0x07, 0x74, 0x78, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0x8a, 0x01, 0x0a, 0x0e, 0x41, 0x63, 0x74,
0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x64,
0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65,
0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74,
0x65, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x6e,
0x6f, 0x64, 0x65, 0x49, 0x44, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x07, 0x6e, 0x6f,
0x64, 0x65, 0x49, 0x44, 0x73, 0x12, 0x2e, 0x0a, 0x08, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x49,
0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x61,
0x6c, 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x52, 0x08, 0x64, 0x65, 0x70,
0x6c, 0x6f, 0x79, 0x49, 0x44, 0x2a, 0x45, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12,
0x11, 0x0a, 0x0d, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x50, 0x4c, 0x4f, 0x59,
0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x55, 0x52,
0x43, 0x48, 0x41, 0x53, 0x45, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x41, 0x43, 0x54, 0x49, 0x4f,
0x4e, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x47, 0x41, 0x54, 0x45, 0x10, 0x02, 0x42, 0x43, 0x5a, 0x41,
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x61, 0x7a, 0x65, 0x2d,
0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2d,
0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x2f,
0x6e, 0x6f, 0x64, 0x65, 0x73, 0x61, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_modules_nodesale_protobuf_nodesale_proto_rawDescOnce sync.Once
file_modules_nodesale_protobuf_nodesale_proto_rawDescData = file_modules_nodesale_protobuf_nodesale_proto_rawDesc
)
func file_modules_nodesale_protobuf_nodesale_proto_rawDescGZIP() []byte {
file_modules_nodesale_protobuf_nodesale_proto_rawDescOnce.Do(func() {
file_modules_nodesale_protobuf_nodesale_proto_rawDescData = protoimpl.X.CompressGZIP(file_modules_nodesale_protobuf_nodesale_proto_rawDescData)
})
return file_modules_nodesale_protobuf_nodesale_proto_rawDescData
}
var file_modules_nodesale_protobuf_nodesale_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_modules_nodesale_protobuf_nodesale_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
var file_modules_nodesale_protobuf_nodesale_proto_goTypes = []interface{}{
(Action)(0), // 0: nodesale.Action
(*NodeSaleEvent)(nil), // 1: nodesale.NodeSaleEvent
(*ActionDeploy)(nil), // 2: nodesale.ActionDeploy
(*Tier)(nil), // 3: nodesale.Tier
(*ActionPurchase)(nil), // 4: nodesale.ActionPurchase
(*PurchasePayload)(nil), // 5: nodesale.PurchasePayload
(*ActionID)(nil), // 6: nodesale.ActionID
(*ActionDelegate)(nil), // 7: nodesale.ActionDelegate
}
var file_modules_nodesale_protobuf_nodesale_proto_depIdxs = []int32{
0, // 0: nodesale.NodeSaleEvent.action:type_name -> nodesale.Action
2, // 1: nodesale.NodeSaleEvent.deploy:type_name -> nodesale.ActionDeploy
4, // 2: nodesale.NodeSaleEvent.purchase:type_name -> nodesale.ActionPurchase
7, // 3: nodesale.NodeSaleEvent.delegate:type_name -> nodesale.ActionDelegate
3, // 4: nodesale.ActionDeploy.tiers:type_name -> nodesale.Tier
5, // 5: nodesale.ActionPurchase.payload:type_name -> nodesale.PurchasePayload
6, // 6: nodesale.PurchasePayload.deployID:type_name -> nodesale.ActionID
6, // 7: nodesale.ActionDelegate.deployID:type_name -> nodesale.ActionID
8, // [8:8] is the sub-list for method output_type
8, // [8:8] is the sub-list for method input_type
8, // [8:8] is the sub-list for extension type_name
8, // [8:8] is the sub-list for extension extendee
0, // [0:8] is the sub-list for field type_name
}
func init() { file_modules_nodesale_protobuf_nodesale_proto_init() }
func file_modules_nodesale_protobuf_nodesale_proto_init() {
if File_modules_nodesale_protobuf_nodesale_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_modules_nodesale_protobuf_nodesale_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*NodeSaleEvent); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_modules_nodesale_protobuf_nodesale_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ActionDeploy); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_modules_nodesale_protobuf_nodesale_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Tier); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_modules_nodesale_protobuf_nodesale_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ActionPurchase); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_modules_nodesale_protobuf_nodesale_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PurchasePayload); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_modules_nodesale_protobuf_nodesale_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ActionID); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_modules_nodesale_protobuf_nodesale_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ActionDelegate); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
file_modules_nodesale_protobuf_nodesale_proto_msgTypes[0].OneofWrappers = []interface{}{}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_modules_nodesale_protobuf_nodesale_proto_rawDesc,
NumEnums: 1,
NumMessages: 7,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_modules_nodesale_protobuf_nodesale_proto_goTypes,
DependencyIndexes: file_modules_nodesale_protobuf_nodesale_proto_depIdxs,
EnumInfos: file_modules_nodesale_protobuf_nodesale_proto_enumTypes,
MessageInfos: file_modules_nodesale_protobuf_nodesale_proto_msgTypes,
}.Build()
File_modules_nodesale_protobuf_nodesale_proto = out.File
file_modules_nodesale_protobuf_nodesale_proto_rawDesc = nil
file_modules_nodesale_protobuf_nodesale_proto_goTypes = nil
file_modules_nodesale_protobuf_nodesale_proto_depIdxs = nil
}

View File

@@ -0,0 +1,60 @@
syntax = "proto3";
// protoc modules/nodesale/protobuf/nodesale.proto --go_out=. --go_opt=module=github.com/gaze-network/indexer-network
package nodesale;
option go_package = "github.com/gaze-network/indexer-network/modules/nodesale/protobuf";
enum Action {
ACTION_DEPLOY = 0;
ACTION_PURCHASE = 1;
ACTION_DELEGATE = 2;
}
message NodeSaleEvent {
Action action = 1;
optional ActionDeploy deploy = 2;
optional ActionPurchase purchase = 3;
optional ActionDelegate delegate = 4;
}
message ActionDeploy {
string name = 1;
uint32 startsAt = 2;
uint32 endsAt = 3;
repeated Tier tiers = 4;
string sellerPublicKey = 5;
uint32 maxPerAddress = 6;
uint32 maxDiscountPercentage = 7;
string sellerWallet = 8;
}
message Tier {
uint32 priceSat = 1;
uint32 limit = 2;
uint32 maxPerAddress = 3;
}
message ActionPurchase {
PurchasePayload payload = 1;
string sellerSignature = 2;
}
message PurchasePayload {
ActionID deployID = 1;
string buyerPublicKey = 2;
repeated uint32 nodeIDs = 3;
int64 totalAmountSat = 4;
uint64 timeOutBlock = 5;
}
message ActionID {
uint64 block = 1;
uint32 txIndex = 2;
}
message ActionDelegate {
string delegateePublicKey = 1;
repeated uint32 nodeIDs = 2;
ActionID deployID = 3;
}

View File

@@ -0,0 +1,12 @@
package nodesale
import (
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcutil"
)
func (p *Processor) PubkeyToPkHashAddress(pubKey *btcec.PublicKey) btcutil.Address {
addrPubKey, _ := btcutil.NewAddressPubKey(pubKey.SerializeCompressed(), p.Network.ChainParams())
addrPubKeyHash := addrPubKey.AddressPubKeyHash()
return addrPubKeyHash
}

View File

@@ -0,0 +1,87 @@
package nodesale
import (
"context"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
purchasevalidator "github.com/gaze-network/indexer-network/modules/nodesale/internal/validator/purchase"
)
func (p *Processor) ProcessPurchase(ctx context.Context, qtx datagateway.NodeSaleDataGatewayWithTx, block *types.Block, event NodeSaleEvent) error {
purchase := event.EventMessage.Purchase
payload := purchase.Payload
validator := purchasevalidator.New()
validator.EqualXonlyPublicKey(payload.BuyerPublicKey, event.TxPubkey)
_, deploy, err := validator.NodeSaleExists(ctx, qtx, payload)
if err != nil {
return errors.Wrap(err, "cannot query. Something wrong.")
}
validator.ValidTimestamp(deploy, block.Header.Timestamp)
validator.WithinTimeoutBlock(payload.TimeOutBlock, uint64(event.Transaction.BlockHeight))
validator.VerifySignature(purchase, deploy)
_, tierMap := validator.ValidTiers(payload, deploy)
tiers := tierMap.Tiers
buyingTiersCount := tierMap.BuyingTiersCount
nodeIdToTier := tierMap.NodeIdToTier
_, err = validator.ValidUnpurchasedNodes(ctx, qtx, payload)
if err != nil {
return errors.Wrap(err, "cannot query. Something wrong.")
}
_, meta := validator.ValidPaidAmount(payload, deploy, event.InputValue, tiers, buyingTiersCount, p.Network.ChainParams())
_, err = validator.WithinLimit(ctx, qtx, payload, deploy, tiers, buyingTiersCount)
if err != nil {
return errors.Wrap(err, "cannot query. Something wrong.")
}
err = qtx.CreateEvent(ctx, entity.NodeSaleEvent{
TxHash: event.Transaction.TxHash.String(),
TxIndex: int32(event.Transaction.Index),
Action: int32(event.EventMessage.Action),
RawMessage: event.RawData,
ParsedMessage: event.EventJson,
BlockTimestamp: block.Header.Timestamp,
BlockHash: event.Transaction.BlockHash.String(),
BlockHeight: event.Transaction.BlockHeight,
Valid: validator.Valid,
WalletAddress: p.PubkeyToPkHashAddress(event.TxPubkey).EncodeAddress(),
Metadata: meta,
Reason: validator.Reason,
})
if err != nil {
return errors.Wrap(err, "Failed to insert event")
}
if validator.Valid {
// add to node
for _, nodeId := range payload.NodeIDs {
err := qtx.CreateNode(ctx, entity.Node{
SaleBlock: deploy.BlockHeight,
SaleTxIndex: deploy.TxIndex,
NodeID: nodeId,
TierIndex: nodeIdToTier[nodeId],
DelegatedTo: "",
OwnerPublicKey: payload.BuyerPublicKey,
PurchaseTxHash: event.Transaction.TxHash.String(),
DelegateTxHash: "",
})
if err != nil {
return errors.Wrap(err, "Failed to insert node")
}
}
}
return nil
}

View File

@@ -0,0 +1,902 @@
package nodesale
import (
"context"
"encoding/hex"
"testing"
"time"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway/mocks"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/validator"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/validator/purchase"
"github.com/gaze-network/indexer-network/modules/nodesale/protobuf"
"github.com/samber/lo"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
)
func TestInvalidPurchase(t *testing.T) {
ctx := context.Background()
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
buyerPrivateKey, err := btcec.NewPrivateKey()
require.NoError(t, err)
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
message := &protobuf.NodeSaleEvent{
Action: protobuf.Action_ACTION_PURCHASE,
Purchase: &protobuf.ActionPurchase{
Payload: &protobuf.PurchasePayload{
DeployID: &protobuf.ActionID{
Block: 111,
TxIndex: 1,
},
NodeIDs: []uint32{1, 2},
BuyerPublicKey: buyerPubkeyHex,
TotalAmountSat: 500,
TimeOutBlock: uint64(testBlockHeight) + 5,
},
},
}
event, block := assembleTestEvent(buyerPrivateKey, "030303030303", "030303030303", 0, 0, message)
mockDgTx.EXPECT().GetNodeSale(mock.Anything, mock.Anything).Return(nil, nil)
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
return event.Valid == false
})).Return(nil)
err = p.ProcessPurchase(ctx, mockDgTx, block, event)
require.NoError(t, err)
mockDgTx.AssertNotCalled(t, "CreateNode")
}
func TestInvalidBuyerKey(t *testing.T) {
ctx := context.Background()
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
strangerPrivateKey, _ := btcec.NewPrivateKey()
strangerPrivateKeyHex := hex.EncodeToString(strangerPrivateKey.PubKey().SerializeCompressed())
buyerPrivateKey, _ := btcec.NewPrivateKey()
message := &protobuf.NodeSaleEvent{
Action: protobuf.Action_ACTION_PURCHASE,
Purchase: &protobuf.ActionPurchase{
Payload: &protobuf.PurchasePayload{
DeployID: &protobuf.ActionID{
Block: 100,
TxIndex: 1,
},
NodeIDs: []uint32{1, 2},
BuyerPublicKey: strangerPrivateKeyHex,
TotalAmountSat: 200,
TimeOutBlock: uint64(testBlockHeight) + 5,
},
},
}
event, block := assembleTestEvent(buyerPrivateKey, "0707070707", "0707070707", 0, 0, message)
block.Header.Timestamp = time.Now().UTC()
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
return event.Valid == false && event.Reason == validator.INVALID_PUBKEY
})).Return(nil)
err := p.ProcessPurchase(ctx, mockDgTx, block, event)
require.NoError(t, err)
mockDgTx.AssertNotCalled(t, "CreateNode")
}
func TestInvalidTimestamp(t *testing.T) {
ctx := context.Background()
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
sellerPrivateKey, err := btcec.NewPrivateKey()
require.NoError(t, err)
sellerPubkeyHex := hex.EncodeToString(sellerPrivateKey.PubKey().SerializeCompressed())
sellerWallet := p.PubkeyToPkHashAddress(sellerPrivateKey.PubKey())
startAt := time.Now().Add(time.Hour * -1)
endAt := time.Now().Add(time.Hour * 1)
tiers := lo.Map([]*protobuf.Tier{
{
PriceSat: 100,
Limit: 5,
MaxPerAddress: 100,
},
{
PriceSat: 200,
Limit: 5,
MaxPerAddress: 100,
},
}, func(tier *protobuf.Tier, _ int) []byte {
tierJson, err := protojson.Marshal(tier)
require.NoError(t, err)
return tierJson
})
mockDgTx.EXPECT().GetNodeSale(mock.Anything, datagateway.GetNodeSaleParams{
BlockHeight: 100,
TxIndex: 1,
}).Return([]entity.NodeSale{
{
BlockHeight: 100,
TxIndex: 1,
Name: t.Name(),
StartsAt: startAt,
EndsAt: endAt,
Tiers: tiers,
SellerPublicKey: sellerPubkeyHex,
MaxPerAddress: 100,
DeployTxHash: "040404040404",
MaxDiscountPercentage: 50,
SellerWallet: sellerWallet.EncodeAddress(),
},
}, nil)
buyerPrivateKey, _ := btcec.NewPrivateKey()
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
message := &protobuf.NodeSaleEvent{
Action: protobuf.Action_ACTION_PURCHASE,
Purchase: &protobuf.ActionPurchase{
Payload: &protobuf.PurchasePayload{
DeployID: &protobuf.ActionID{
Block: 100,
TxIndex: 1,
},
NodeIDs: []uint32{1, 2},
BuyerPublicKey: buyerPubkeyHex,
TotalAmountSat: 200,
TimeOutBlock: uint64(testBlockHeight) + 5,
},
},
}
event, block := assembleTestEvent(buyerPrivateKey, "050505050505", "050505050505", 0, 0, message)
block.Header.Timestamp = time.Now().UTC().Add(time.Hour * 2)
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
return event.Valid == false && event.Reason == purchase.PURCHASE_TIMEOUT
})).Return(nil)
err = p.ProcessPurchase(ctx, mockDgTx, block, event)
require.NoError(t, err)
mockDgTx.AssertNotCalled(t, "CreateNode")
}
func TestTimeOut(t *testing.T) {
ctx := context.Background()
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
sellerPrivateKey, _ := btcec.NewPrivateKey()
sellerPubkeyHex := hex.EncodeToString(sellerPrivateKey.PubKey().SerializeCompressed())
sellerWallet := p.PubkeyToPkHashAddress(sellerPrivateKey.PubKey())
startAt := time.Now().Add(time.Hour * -1)
endAt := time.Now().Add(time.Hour * 1)
tiers := lo.Map([]*protobuf.Tier{
{
PriceSat: 100,
Limit: 5,
MaxPerAddress: 100,
},
{
PriceSat: 200,
Limit: 5,
MaxPerAddress: 100,
},
}, func(tier *protobuf.Tier, _ int) []byte {
tierJson, err := protojson.Marshal(tier)
require.NoError(t, err)
return tierJson
})
mockDgTx.EXPECT().GetNodeSale(mock.Anything, datagateway.GetNodeSaleParams{
BlockHeight: 100,
TxIndex: 1,
}).Return([]entity.NodeSale{
{
BlockHeight: 100,
TxIndex: 1,
Name: t.Name(),
StartsAt: startAt,
EndsAt: endAt,
Tiers: tiers,
SellerPublicKey: sellerPubkeyHex,
MaxPerAddress: 100,
DeployTxHash: "040404040404",
MaxDiscountPercentage: 50,
SellerWallet: sellerWallet.EncodeAddress(),
},
}, nil)
buyerPrivateKey, _ := btcec.NewPrivateKey()
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
message := &protobuf.NodeSaleEvent{
Action: protobuf.Action_ACTION_PURCHASE,
Purchase: &protobuf.ActionPurchase{
Payload: &protobuf.PurchasePayload{
DeployID: &protobuf.ActionID{
Block: 100,
TxIndex: 1,
},
NodeIDs: []uint32{1, 2},
BuyerPublicKey: buyerPubkeyHex,
TimeOutBlock: uint64(testBlockHeight) - 5,
TotalAmountSat: 200,
},
},
}
event, block := assembleTestEvent(buyerPrivateKey, "090909090909", "090909090909", 0, 0, message)
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
return event.Valid == false && event.Reason == purchase.BLOCK_HEIGHT_TIMEOUT
})).Return(nil)
err := p.ProcessPurchase(ctx, mockDgTx, block, event)
require.NoError(t, err)
mockDgTx.AssertNotCalled(t, "CreateNode")
}
func TestSignatureInvalid(t *testing.T) {
ctx := context.Background()
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
sellerPrivateKey, _ := btcec.NewPrivateKey()
sellerPubkeyHex := hex.EncodeToString(sellerPrivateKey.PubKey().SerializeCompressed())
sellerWallet := p.PubkeyToPkHashAddress(sellerPrivateKey.PubKey())
startAt := time.Now().Add(time.Hour * -1)
endAt := time.Now().Add(time.Hour * 1)
tiers := lo.Map([]*protobuf.Tier{
{
PriceSat: 100,
Limit: 5,
MaxPerAddress: 100,
},
{
PriceSat: 200,
Limit: 5,
MaxPerAddress: 100,
},
}, func(tier *protobuf.Tier, _ int) []byte {
tierJson, err := protojson.Marshal(tier)
require.NoError(t, err)
return tierJson
})
mockDgTx.EXPECT().GetNodeSale(mock.Anything, datagateway.GetNodeSaleParams{
BlockHeight: 100,
TxIndex: 1,
}).Return([]entity.NodeSale{
{
BlockHeight: 100,
TxIndex: 1,
Name: t.Name(),
StartsAt: startAt,
EndsAt: endAt,
Tiers: tiers,
SellerPublicKey: sellerPubkeyHex,
MaxPerAddress: 100,
DeployTxHash: "040404040404",
MaxDiscountPercentage: 50,
SellerWallet: sellerWallet.EncodeAddress(),
},
}, nil)
buyerPrivateKey, _ := btcec.NewPrivateKey()
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
payload := &protobuf.PurchasePayload{
DeployID: &protobuf.ActionID{
Block: 100,
TxIndex: 1,
},
NodeIDs: []uint32{1, 2},
BuyerPublicKey: buyerPubkeyHex,
TimeOutBlock: testBlockHeight + 5,
}
payloadBytes, _ := proto.Marshal(payload)
payloadHash := chainhash.DoubleHashB(payloadBytes)
signature := ecdsa.Sign(buyerPrivateKey, payloadHash[:])
signatureHex := hex.EncodeToString(signature.Serialize())
message := &protobuf.NodeSaleEvent{
Action: protobuf.Action_ACTION_PURCHASE,
Purchase: &protobuf.ActionPurchase{
Payload: payload,
SellerSignature: signatureHex,
},
}
event, block := assembleTestEvent(buyerPrivateKey, "0B0B0B", "0B0B0B", 0, 0, message)
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
return event.Valid == false && event.Reason == purchase.INVALID_SIGNATURE
})).Return(nil)
err := p.ProcessPurchase(ctx, mockDgTx, block, event)
require.NoError(t, err)
mockDgTx.AssertNotCalled(t, "CreateNode")
}
func TestValidPurchase(t *testing.T) {
ctx := context.Background()
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
sellerPrivateKey, _ := btcec.NewPrivateKey()
sellerPubkeyHex := hex.EncodeToString(sellerPrivateKey.PubKey().SerializeCompressed())
sellerWallet := p.PubkeyToPkHashAddress(sellerPrivateKey.PubKey())
startAt := time.Now().Add(time.Hour * -1)
endAt := time.Now().Add(time.Hour * 1)
tiers := lo.Map([]*protobuf.Tier{
{
PriceSat: 100,
Limit: 5,
MaxPerAddress: 100,
},
{
PriceSat: 200,
Limit: 4,
MaxPerAddress: 2,
},
{
PriceSat: 400,
Limit: 3,
MaxPerAddress: 100,
},
}, func(tier *protobuf.Tier, _ int) []byte {
tierJson, err := protojson.Marshal(tier)
require.NoError(t, err)
return tierJson
})
mockDgTx.EXPECT().GetNodeSale(mock.Anything, datagateway.GetNodeSaleParams{
BlockHeight: 100,
TxIndex: 1,
}).Return([]entity.NodeSale{
{
BlockHeight: 100,
TxIndex: 1,
Name: t.Name(),
StartsAt: startAt,
EndsAt: endAt,
Tiers: tiers,
SellerPublicKey: sellerPubkeyHex,
MaxPerAddress: 100,
DeployTxHash: "040404040404",
MaxDiscountPercentage: 50,
SellerWallet: sellerWallet.EncodeAddress(),
},
}, nil)
mockDgTx.EXPECT().GetNodesByIds(mock.Anything, mock.Anything).Return(nil, nil)
mockDgTx.EXPECT().GetNodesByOwner(mock.Anything, mock.Anything).Return(nil, nil)
buyerPrivateKey, _ := btcec.NewPrivateKey()
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
payload := &protobuf.PurchasePayload{
DeployID: &protobuf.ActionID{
Block: 100,
TxIndex: 1,
},
BuyerPublicKey: buyerPubkeyHex,
TimeOutBlock: uint64(testBlockHeight) + 5,
NodeIDs: []uint32{0, 5, 6, 9},
TotalAmountSat: 500,
}
payloadBytes, _ := proto.Marshal(payload)
payloadHash := chainhash.DoubleHashB(payloadBytes)
signature := ecdsa.Sign(sellerPrivateKey, payloadHash[:])
signatureHex := hex.EncodeToString(signature.Serialize())
message := &protobuf.NodeSaleEvent{
Action: protobuf.Action_ACTION_PURCHASE,
Purchase: &protobuf.ActionPurchase{
Payload: payload,
SellerSignature: signatureHex,
},
}
event, block := assembleTestEvent(buyerPrivateKey, "0D0D0D0D", "0D0D0D0D", 0, 0, message)
event.InputValue = 500
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
return event.Valid == true && event.Reason == ""
})).Return(nil)
mockDgTx.EXPECT().CreateNode(mock.Anything, mock.MatchedBy(func(node entity.Node) bool {
return node.NodeID == 0 &&
node.TierIndex == 0 &&
node.OwnerPublicKey == buyerPubkeyHex &&
node.PurchaseTxHash == event.Transaction.TxHash.String() &&
node.SaleBlock == 100 &&
node.SaleTxIndex == 1
})).Return(nil)
mockDgTx.EXPECT().CreateNode(mock.Anything, mock.MatchedBy(func(node entity.Node) bool {
return node.NodeID == 5 &&
node.TierIndex == 1 &&
node.OwnerPublicKey == buyerPubkeyHex &&
node.PurchaseTxHash == event.Transaction.TxHash.String() &&
node.SaleBlock == 100 &&
node.SaleTxIndex == 1
})).Return(nil)
mockDgTx.EXPECT().CreateNode(mock.Anything, mock.MatchedBy(func(node entity.Node) bool {
return node.NodeID == 6 &&
node.TierIndex == 1 &&
node.OwnerPublicKey == buyerPubkeyHex &&
node.PurchaseTxHash == event.Transaction.TxHash.String() &&
node.SaleBlock == 100 &&
node.SaleTxIndex == 1
})).Return(nil)
mockDgTx.EXPECT().CreateNode(mock.Anything, mock.MatchedBy(func(node entity.Node) bool {
return node.NodeID == 9 &&
node.TierIndex == 2 &&
node.OwnerPublicKey == buyerPubkeyHex &&
node.PurchaseTxHash == event.Transaction.TxHash.String() &&
node.SaleBlock == 100 &&
node.SaleTxIndex == 1
})).Return(nil)
err := p.ProcessPurchase(ctx, mockDgTx, block, event)
require.NoError(t, err)
}
func TestMismatchPayment(t *testing.T) {
ctx := context.Background()
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
sellerPrivateKey, _ := btcec.NewPrivateKey()
sellerPubkeyHex := hex.EncodeToString(sellerPrivateKey.PubKey().SerializeCompressed())
sellerWallet := p.PubkeyToPkHashAddress(sellerPrivateKey.PubKey())
startAt := time.Now().Add(time.Hour * -1)
endAt := time.Now().Add(time.Hour * 1)
tiers := lo.Map([]*protobuf.Tier{
{
PriceSat: 100,
Limit: 5,
MaxPerAddress: 100,
},
{
PriceSat: 200,
Limit: 4,
MaxPerAddress: 2,
},
{
PriceSat: 400,
Limit: 3,
MaxPerAddress: 100,
},
}, func(tier *protobuf.Tier, _ int) []byte {
tierJson, err := protojson.Marshal(tier)
require.NoError(t, err)
return tierJson
})
mockDgTx.EXPECT().GetNodeSale(mock.Anything, datagateway.GetNodeSaleParams{
BlockHeight: 100,
TxIndex: 1,
}).Return([]entity.NodeSale{
{
BlockHeight: 100,
TxIndex: 1,
Name: t.Name(),
StartsAt: startAt,
EndsAt: endAt,
Tiers: tiers,
SellerPublicKey: sellerPubkeyHex,
MaxPerAddress: 100,
DeployTxHash: "040404040404",
MaxDiscountPercentage: 50,
SellerWallet: sellerWallet.EncodeAddress(),
},
}, nil)
mockDgTx.EXPECT().GetNodesByIds(mock.Anything, mock.Anything).Return(nil, nil)
buyerPrivateKey, _ := btcec.NewPrivateKey()
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
payload := &protobuf.PurchasePayload{
DeployID: &protobuf.ActionID{
Block: 100,
TxIndex: 1,
},
BuyerPublicKey: buyerPubkeyHex,
TimeOutBlock: uint64(testBlockHeight) + 5,
NodeIDs: []uint32{0, 5, 6, 9},
TotalAmountSat: 500,
}
payloadBytes, _ := proto.Marshal(payload)
payloadHash := chainhash.DoubleHashB(payloadBytes)
signature := ecdsa.Sign(sellerPrivateKey, payloadHash[:])
signatureHex := hex.EncodeToString(signature.Serialize())
message := &protobuf.NodeSaleEvent{
Action: protobuf.Action_ACTION_PURCHASE,
Purchase: &protobuf.ActionPurchase{
Payload: payload,
SellerSignature: signatureHex,
},
}
event, block := assembleTestEvent(buyerPrivateKey, "0D0D0D0D", "0D0D0D0D", 0, 0, message)
event.InputValue = 400
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
return event.Valid == false && event.Reason == purchase.INVALID_PAYMENT
})).Return(nil)
err := p.ProcessPurchase(ctx, mockDgTx, block, event)
require.NoError(t, err)
}
func TestInsufficientFund(t *testing.T) {
ctx := context.Background()
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
sellerPrivateKey, _ := btcec.NewPrivateKey()
sellerPubkeyHex := hex.EncodeToString(sellerPrivateKey.PubKey().SerializeCompressed())
sellerWallet := p.PubkeyToPkHashAddress(sellerPrivateKey.PubKey())
startAt := time.Now().Add(time.Hour * -1)
endAt := time.Now().Add(time.Hour * 1)
tiers := lo.Map([]*protobuf.Tier{
{
PriceSat: 100,
Limit: 5,
MaxPerAddress: 100,
},
{
PriceSat: 200,
Limit: 4,
MaxPerAddress: 2,
},
{
PriceSat: 400,
Limit: 3,
MaxPerAddress: 100,
},
}, func(tier *protobuf.Tier, _ int) []byte {
tierJson, err := protojson.Marshal(tier)
require.NoError(t, err)
return tierJson
})
mockDgTx.EXPECT().GetNodeSale(mock.Anything, datagateway.GetNodeSaleParams{
BlockHeight: 100,
TxIndex: 1,
}).Return([]entity.NodeSale{
{
BlockHeight: 100,
TxIndex: 1,
Name: t.Name(),
StartsAt: startAt,
EndsAt: endAt,
Tiers: tiers,
SellerPublicKey: sellerPubkeyHex,
MaxPerAddress: 100,
DeployTxHash: "040404040404",
MaxDiscountPercentage: 50,
SellerWallet: sellerWallet.EncodeAddress(),
},
}, nil)
mockDgTx.EXPECT().GetNodesByIds(mock.Anything, mock.Anything).Return(nil, nil)
buyerPrivateKey, _ := btcec.NewPrivateKey()
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
payload := &protobuf.PurchasePayload{
DeployID: &protobuf.ActionID{
Block: 100,
TxIndex: 1,
},
BuyerPublicKey: buyerPubkeyHex,
TimeOutBlock: uint64(testBlockHeight) + 5,
NodeIDs: []uint32{0, 5, 6, 9},
TotalAmountSat: 200,
}
payloadBytes, _ := proto.Marshal(payload)
payloadHash := chainhash.DoubleHashB(payloadBytes)
signature := ecdsa.Sign(sellerPrivateKey, payloadHash[:])
signatureHex := hex.EncodeToString(signature.Serialize())
message := &protobuf.NodeSaleEvent{
Action: protobuf.Action_ACTION_PURCHASE,
Purchase: &protobuf.ActionPurchase{
Payload: payload,
SellerSignature: signatureHex,
},
}
event, block := assembleTestEvent(buyerPrivateKey, "0D0D0D0D", "0D0D0D0D", 0, 0, message)
event.InputValue = 200
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
return event.Valid == false && event.Reason == purchase.INSUFFICIENT_FUND
})).Return(nil)
err := p.ProcessPurchase(ctx, mockDgTx, block, event)
require.NoError(t, err)
}
func TestBuyingLimit(t *testing.T) {
ctx := context.Background()
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
sellerPrivateKey, _ := btcec.NewPrivateKey()
sellerPubkeyHex := hex.EncodeToString(sellerPrivateKey.PubKey().SerializeCompressed())
sellerWallet := p.PubkeyToPkHashAddress(sellerPrivateKey.PubKey())
startAt := time.Now().Add(time.Hour * -1)
endAt := time.Now().Add(time.Hour * 1)
tiers := lo.Map([]*protobuf.Tier{
{
PriceSat: 100,
Limit: 5,
MaxPerAddress: 100,
},
{
PriceSat: 200,
Limit: 4,
MaxPerAddress: 2,
},
{
PriceSat: 400,
Limit: 50,
MaxPerAddress: 100,
},
}, func(tier *protobuf.Tier, _ int) []byte {
tierJson, err := protojson.Marshal(tier)
require.NoError(t, err)
return tierJson
})
mockDgTx.EXPECT().GetNodeSale(mock.Anything, datagateway.GetNodeSaleParams{
BlockHeight: 100,
TxIndex: 1,
}).Return([]entity.NodeSale{
{
BlockHeight: 100,
TxIndex: 1,
Name: t.Name(),
StartsAt: startAt,
EndsAt: endAt,
Tiers: tiers,
SellerPublicKey: sellerPubkeyHex,
MaxPerAddress: 2,
DeployTxHash: "040404040404",
MaxDiscountPercentage: 50,
SellerWallet: sellerWallet.EncodeAddress(),
},
}, nil)
buyerPrivateKey, _ := btcec.NewPrivateKey()
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
mockDgTx.EXPECT().GetNodesByIds(mock.Anything, mock.Anything).Return(nil, nil)
mockDgTx.EXPECT().GetNodesByOwner(mock.Anything, datagateway.GetNodesByOwnerParams{
SaleBlock: 100,
SaleTxIndex: 1,
OwnerPublicKey: buyerPubkeyHex,
}).Return([]entity.Node{
{
SaleBlock: 100,
SaleTxIndex: 1,
NodeID: 9,
TierIndex: 2,
OwnerPublicKey: buyerPubkeyHex,
},
{
SaleBlock: 100,
SaleTxIndex: 1,
NodeID: 10,
TierIndex: 2,
OwnerPublicKey: buyerPubkeyHex,
},
}, nil)
payload := &protobuf.PurchasePayload{
DeployID: &protobuf.ActionID{
Block: 100,
TxIndex: 1,
},
BuyerPublicKey: buyerPubkeyHex,
TimeOutBlock: uint64(testBlockHeight) + 5,
NodeIDs: []uint32{11},
TotalAmountSat: 600,
}
payloadBytes, _ := proto.Marshal(payload)
payloadHash := chainhash.DoubleHashB(payloadBytes)
signature := ecdsa.Sign(sellerPrivateKey, payloadHash[:])
signatureHex := hex.EncodeToString(signature.Serialize())
message := &protobuf.NodeSaleEvent{
Action: protobuf.Action_ACTION_PURCHASE,
Purchase: &protobuf.ActionPurchase{
Payload: payload,
SellerSignature: signatureHex,
},
}
event, block := assembleTestEvent(buyerPrivateKey, "22222222", "22222222", 0, 0, message)
event.InputValue = 600
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
return event.Valid == false && event.Reason == purchase.OVER_LIMIT_PER_ADDR
})).Return(nil)
err := p.ProcessPurchase(ctx, mockDgTx, block, event)
require.NoError(t, err)
mockDgTx.AssertNotCalled(t, "CreateNode")
}
func TestBuyingTierLimit(t *testing.T) {
ctx := context.Background()
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
sellerPrivateKey, _ := btcec.NewPrivateKey()
sellerPubkeyHex := hex.EncodeToString(sellerPrivateKey.PubKey().SerializeCompressed())
sellerWallet := p.PubkeyToPkHashAddress(sellerPrivateKey.PubKey())
startAt := time.Now().Add(time.Hour * -1)
endAt := time.Now().Add(time.Hour * 1)
tiers := lo.Map([]*protobuf.Tier{
{
PriceSat: 100,
Limit: 5,
MaxPerAddress: 100,
},
{
PriceSat: 200,
Limit: 4,
MaxPerAddress: 2,
},
{
PriceSat: 400,
Limit: 50,
MaxPerAddress: 3,
},
}, func(tier *protobuf.Tier, _ int) []byte {
tierJson, err := protojson.Marshal(tier)
require.NoError(t, err)
return tierJson
})
mockDgTx.EXPECT().GetNodeSale(mock.Anything, datagateway.GetNodeSaleParams{
BlockHeight: 100,
TxIndex: 1,
}).Return([]entity.NodeSale{
{
BlockHeight: 100,
TxIndex: 1,
Name: t.Name(),
StartsAt: startAt,
EndsAt: endAt,
Tiers: tiers,
SellerPublicKey: sellerPubkeyHex,
MaxPerAddress: 100,
DeployTxHash: "040404040404",
MaxDiscountPercentage: 50,
SellerWallet: sellerWallet.EncodeAddress(),
},
}, nil)
buyerPrivateKey, _ := btcec.NewPrivateKey()
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
mockDgTx.EXPECT().GetNodesByIds(mock.Anything, mock.Anything).Return(nil, nil)
mockDgTx.EXPECT().GetNodesByOwner(mock.Anything, datagateway.GetNodesByOwnerParams{
SaleBlock: 100,
SaleTxIndex: 1,
OwnerPublicKey: buyerPubkeyHex,
}).Return([]entity.Node{
{
SaleBlock: 100,
SaleTxIndex: 1,
NodeID: 9,
TierIndex: 2,
OwnerPublicKey: buyerPubkeyHex,
},
{
SaleBlock: 100,
SaleTxIndex: 1,
NodeID: 10,
TierIndex: 2,
OwnerPublicKey: buyerPubkeyHex,
},
{
SaleBlock: 100,
SaleTxIndex: 1,
NodeID: 11,
TierIndex: 2,
OwnerPublicKey: buyerPubkeyHex,
},
}, nil)
payload := &protobuf.PurchasePayload{
DeployID: &protobuf.ActionID{
Block: 100,
TxIndex: 1,
},
BuyerPublicKey: buyerPubkeyHex,
TimeOutBlock: uint64(testBlockHeight) + 5,
NodeIDs: []uint32{12, 13, 14},
TotalAmountSat: 600,
}
payloadBytes, _ := proto.Marshal(payload)
payloadHash := chainhash.DoubleHashB(payloadBytes)
signature := ecdsa.Sign(sellerPrivateKey, payloadHash[:])
signatureHex := hex.EncodeToString(signature.Serialize())
message := &protobuf.NodeSaleEvent{
Action: protobuf.Action_ACTION_PURCHASE,
Purchase: &protobuf.ActionPurchase{
Payload: payload,
SellerSignature: signatureHex,
},
}
event, block := assembleTestEvent(buyerPrivateKey, "10101010", "10101010", 0, 0, message)
event.InputValue = 600
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
return event.Valid == false && event.Reason == purchase.OVER_LIMIT_PER_TIER
})).Return(nil)
err := p.ProcessPurchase(ctx, mockDgTx, block, event)
require.NoError(t, err)
}

View File

@@ -0,0 +1,62 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
// source: blocks.sql
package gen
import (
"context"
)
const createBlock = `-- name: CreateBlock :exec
INSERT INTO blocks ("block_height", "block_hash", "module")
VALUES ($1, $2, $3)
`
type CreateBlockParams struct {
BlockHeight int64
BlockHash string
Module string
}
func (q *Queries) CreateBlock(ctx context.Context, arg CreateBlockParams) error {
_, err := q.db.Exec(ctx, createBlock, arg.BlockHeight, arg.BlockHash, arg.Module)
return err
}
const getBlock = `-- name: GetBlock :one
SELECT block_height, block_hash, module FROM blocks
WHERE "block_height" = $1
`
func (q *Queries) GetBlock(ctx context.Context, blockHeight int64) (Block, error) {
row := q.db.QueryRow(ctx, getBlock, blockHeight)
var i Block
err := row.Scan(&i.BlockHeight, &i.BlockHash, &i.Module)
return i, err
}
const getLastProcessedBlock = `-- name: GetLastProcessedBlock :one
SELECT block_height, block_hash, module FROM blocks ORDER BY block_height DESC LIMIT 1
`
func (q *Queries) GetLastProcessedBlock(ctx context.Context) (Block, error) {
row := q.db.QueryRow(ctx, getLastProcessedBlock)
var i Block
err := row.Scan(&i.BlockHeight, &i.BlockHash, &i.Module)
return i, err
}
const removeBlockFrom = `-- name: RemoveBlockFrom :execrows
DELETE FROM blocks
WHERE "block_height" >= $1
`
func (q *Queries) RemoveBlockFrom(ctx context.Context, fromBlock int64) (int64, error) {
result, err := q.db.Exec(ctx, removeBlockFrom, fromBlock)
if err != nil {
return 0, err
}
return result.RowsAffected(), nil
}

View File

@@ -0,0 +1,32 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
package gen
import (
"context"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
)
type DBTX interface {
Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error)
Query(context.Context, string, ...interface{}) (pgx.Rows, error)
QueryRow(context.Context, string, ...interface{}) pgx.Row
}
func New(db DBTX) *Queries {
return &Queries{db: db}
}
type Queries struct {
db DBTX
}
func (q *Queries) WithTx(tx pgx.Tx) *Queries {
return &Queries{
db: tx,
}
}

View File

@@ -0,0 +1,104 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
// source: events.sql
package gen
import (
"context"
"github.com/jackc/pgx/v5/pgtype"
)
const createEvent = `-- name: CreateEvent :exec
INSERT INTO events ("tx_hash", "block_height", "tx_index", "wallet_address", "valid", "action",
"raw_message", "parsed_message", "block_timestamp", "block_hash", "metadata",
"reason")
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)
`
type CreateEventParams struct {
TxHash string
BlockHeight int64
TxIndex int32
WalletAddress string
Valid bool
Action int32
RawMessage []byte
ParsedMessage []byte
BlockTimestamp pgtype.Timestamp
BlockHash string
Metadata []byte
Reason string
}
func (q *Queries) CreateEvent(ctx context.Context, arg CreateEventParams) error {
_, err := q.db.Exec(ctx, createEvent,
arg.TxHash,
arg.BlockHeight,
arg.TxIndex,
arg.WalletAddress,
arg.Valid,
arg.Action,
arg.RawMessage,
arg.ParsedMessage,
arg.BlockTimestamp,
arg.BlockHash,
arg.Metadata,
arg.Reason,
)
return err
}
const getEventsByWallet = `-- name: GetEventsByWallet :many
SELECT tx_hash, block_height, tx_index, wallet_address, valid, action, raw_message, parsed_message, block_timestamp, block_hash, metadata, reason
FROM events
WHERE wallet_address = $1
`
func (q *Queries) GetEventsByWallet(ctx context.Context, walletAddress string) ([]Event, error) {
rows, err := q.db.Query(ctx, getEventsByWallet, walletAddress)
if err != nil {
return nil, err
}
defer rows.Close()
var items []Event
for rows.Next() {
var i Event
if err := rows.Scan(
&i.TxHash,
&i.BlockHeight,
&i.TxIndex,
&i.WalletAddress,
&i.Valid,
&i.Action,
&i.RawMessage,
&i.ParsedMessage,
&i.BlockTimestamp,
&i.BlockHash,
&i.Metadata,
&i.Reason,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const removeEventsFromBlock = `-- name: RemoveEventsFromBlock :execrows
DELETE FROM events
WHERE "block_height" >= $1
`
func (q *Queries) RemoveEventsFromBlock(ctx context.Context, fromBlock int64) (int64, error) {
result, err := q.db.Exec(ctx, removeEventsFromBlock, fromBlock)
if err != nil {
return 0, err
}
return result.RowsAffected(), nil
}

View File

@@ -0,0 +1,55 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
package gen
import (
"github.com/jackc/pgx/v5/pgtype"
)
type Block struct {
BlockHeight int64
BlockHash string
Module string
}
type Event struct {
TxHash string
BlockHeight int64
TxIndex int32
WalletAddress string
Valid bool
Action int32
RawMessage []byte
ParsedMessage []byte
BlockTimestamp pgtype.Timestamp
BlockHash string
Metadata []byte
Reason string
}
type Node struct {
SaleBlock int64
SaleTxIndex int32
NodeID int32
TierIndex int32
DelegatedTo string
OwnerPublicKey string
PurchaseTxHash string
DelegateTxHash string
}
type NodeSale struct {
BlockHeight int64
TxIndex int32
Name string
StartsAt pgtype.Timestamp
EndsAt pgtype.Timestamp
Tiers [][]byte
SellerPublicKey string
MaxPerAddress int32
DeployTxHash string
MaxDiscountPercentage int32
SellerWallet string
}

View File

@@ -0,0 +1,312 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
// source: nodes.sql
package gen
import (
"context"
)
const clearDelegate = `-- name: ClearDelegate :execrows
UPDATE nodes
SET "delegated_to" = ''
WHERE "delegate_tx_hash" = ''
`
func (q *Queries) ClearDelegate(ctx context.Context) (int64, error) {
result, err := q.db.Exec(ctx, clearDelegate)
if err != nil {
return 0, err
}
return result.RowsAffected(), nil
}
const createNode = `-- name: CreateNode :exec
INSERT INTO nodes (sale_block, sale_tx_index, node_id, tier_index, delegated_to, owner_public_key, purchase_tx_hash, delegate_tx_hash)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
`
type CreateNodeParams struct {
SaleBlock int64
SaleTxIndex int32
NodeID int32
TierIndex int32
DelegatedTo string
OwnerPublicKey string
PurchaseTxHash string
DelegateTxHash string
}
func (q *Queries) CreateNode(ctx context.Context, arg CreateNodeParams) error {
_, err := q.db.Exec(ctx, createNode,
arg.SaleBlock,
arg.SaleTxIndex,
arg.NodeID,
arg.TierIndex,
arg.DelegatedTo,
arg.OwnerPublicKey,
arg.PurchaseTxHash,
arg.DelegateTxHash,
)
return err
}
const getNodeCountByTierIndex = `-- name: GetNodeCountByTierIndex :many
SELECT (tiers.tier_index)::int AS tier_index, count(nodes.tier_index)
FROM generate_series($3::int,$4::int) AS tiers(tier_index)
LEFT JOIN
(SELECT sale_block, sale_tx_index, node_id, tier_index, delegated_to, owner_public_key, purchase_tx_hash, delegate_tx_hash
FROM nodes
WHERE sale_block = $1 AND
sale_tx_index= $2)
AS nodes ON tiers.tier_index = nodes.tier_index
GROUP BY tiers.tier_index
ORDER BY tiers.tier_index
`
type GetNodeCountByTierIndexParams struct {
SaleBlock int64
SaleTxIndex int32
FromTier int32
ToTier int32
}
type GetNodeCountByTierIndexRow struct {
TierIndex int32
Count int64
}
func (q *Queries) GetNodeCountByTierIndex(ctx context.Context, arg GetNodeCountByTierIndexParams) ([]GetNodeCountByTierIndexRow, error) {
rows, err := q.db.Query(ctx, getNodeCountByTierIndex,
arg.SaleBlock,
arg.SaleTxIndex,
arg.FromTier,
arg.ToTier,
)
if err != nil {
return nil, err
}
defer rows.Close()
var items []GetNodeCountByTierIndexRow
for rows.Next() {
var i GetNodeCountByTierIndexRow
if err := rows.Scan(&i.TierIndex, &i.Count); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getNodesByDeployment = `-- name: GetNodesByDeployment :many
SELECT sale_block, sale_tx_index, node_id, tier_index, delegated_to, owner_public_key, purchase_tx_hash, delegate_tx_hash
FROM nodes
WHERE sale_block = $1 AND
sale_tx_index = $2
`
type GetNodesByDeploymentParams struct {
SaleBlock int64
SaleTxIndex int32
}
func (q *Queries) GetNodesByDeployment(ctx context.Context, arg GetNodesByDeploymentParams) ([]Node, error) {
rows, err := q.db.Query(ctx, getNodesByDeployment, arg.SaleBlock, arg.SaleTxIndex)
if err != nil {
return nil, err
}
defer rows.Close()
var items []Node
for rows.Next() {
var i Node
if err := rows.Scan(
&i.SaleBlock,
&i.SaleTxIndex,
&i.NodeID,
&i.TierIndex,
&i.DelegatedTo,
&i.OwnerPublicKey,
&i.PurchaseTxHash,
&i.DelegateTxHash,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getNodesByIds = `-- name: GetNodesByIds :many
SELECT sale_block, sale_tx_index, node_id, tier_index, delegated_to, owner_public_key, purchase_tx_hash, delegate_tx_hash
FROM nodes
WHERE sale_block = $1 AND
sale_tx_index = $2 AND
node_id = ANY ($3::int[])
`
type GetNodesByIdsParams struct {
SaleBlock int64
SaleTxIndex int32
NodeIds []int32
}
func (q *Queries) GetNodesByIds(ctx context.Context, arg GetNodesByIdsParams) ([]Node, error) {
rows, err := q.db.Query(ctx, getNodesByIds, arg.SaleBlock, arg.SaleTxIndex, arg.NodeIds)
if err != nil {
return nil, err
}
defer rows.Close()
var items []Node
for rows.Next() {
var i Node
if err := rows.Scan(
&i.SaleBlock,
&i.SaleTxIndex,
&i.NodeID,
&i.TierIndex,
&i.DelegatedTo,
&i.OwnerPublicKey,
&i.PurchaseTxHash,
&i.DelegateTxHash,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getNodesByOwner = `-- name: GetNodesByOwner :many
SELECT sale_block, sale_tx_index, node_id, tier_index, delegated_to, owner_public_key, purchase_tx_hash, delegate_tx_hash
FROM nodes
WHERE sale_block = $1 AND
sale_tx_index = $2 AND
owner_public_key = $3
ORDER BY tier_index
`
type GetNodesByOwnerParams struct {
SaleBlock int64
SaleTxIndex int32
OwnerPublicKey string
}
func (q *Queries) GetNodesByOwner(ctx context.Context, arg GetNodesByOwnerParams) ([]Node, error) {
rows, err := q.db.Query(ctx, getNodesByOwner, arg.SaleBlock, arg.SaleTxIndex, arg.OwnerPublicKey)
if err != nil {
return nil, err
}
defer rows.Close()
var items []Node
for rows.Next() {
var i Node
if err := rows.Scan(
&i.SaleBlock,
&i.SaleTxIndex,
&i.NodeID,
&i.TierIndex,
&i.DelegatedTo,
&i.OwnerPublicKey,
&i.PurchaseTxHash,
&i.DelegateTxHash,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getNodesByPubkey = `-- name: GetNodesByPubkey :many
SELECT nodes.sale_block, nodes.sale_tx_index, nodes.node_id, nodes.tier_index, nodes.delegated_to, nodes.owner_public_key, nodes.purchase_tx_hash, nodes.delegate_tx_hash
FROM nodes JOIN events ON nodes.purchase_tx_hash = events.tx_hash
WHERE sale_block = $1 AND
sale_tx_index = $2 AND
owner_public_key = $3 AND
delegated_to = $4
`
type GetNodesByPubkeyParams struct {
SaleBlock int64
SaleTxIndex int32
OwnerPublicKey string
DelegatedTo string
}
func (q *Queries) GetNodesByPubkey(ctx context.Context, arg GetNodesByPubkeyParams) ([]Node, error) {
rows, err := q.db.Query(ctx, getNodesByPubkey,
arg.SaleBlock,
arg.SaleTxIndex,
arg.OwnerPublicKey,
arg.DelegatedTo,
)
if err != nil {
return nil, err
}
defer rows.Close()
var items []Node
for rows.Next() {
var i Node
if err := rows.Scan(
&i.SaleBlock,
&i.SaleTxIndex,
&i.NodeID,
&i.TierIndex,
&i.DelegatedTo,
&i.OwnerPublicKey,
&i.PurchaseTxHash,
&i.DelegateTxHash,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const setDelegates = `-- name: SetDelegates :execrows
UPDATE nodes
SET delegated_to = $4, delegate_tx_hash = $3
WHERE sale_block = $1 AND
sale_tx_index = $2 AND
node_id = ANY ($5::int[])
`
type SetDelegatesParams struct {
SaleBlock int64
SaleTxIndex int32
DelegateTxHash string
Delegatee string
NodeIds []int32
}
func (q *Queries) SetDelegates(ctx context.Context, arg SetDelegatesParams) (int64, error) {
result, err := q.db.Exec(ctx, setDelegates,
arg.SaleBlock,
arg.SaleTxIndex,
arg.DelegateTxHash,
arg.Delegatee,
arg.NodeIds,
)
if err != nil {
return 0, err
}
return result.RowsAffected(), nil
}

View File

@@ -0,0 +1,92 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
// source: nodesales.sql
package gen
import (
"context"
"github.com/jackc/pgx/v5/pgtype"
)
const createNodeSale = `-- name: CreateNodeSale :exec
INSERT INTO node_sales ("block_height", "tx_index", "name", "starts_at", "ends_at", "tiers", "seller_public_key", "max_per_address", "deploy_tx_hash", "max_discount_percentage", "seller_wallet")
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)
`
type CreateNodeSaleParams struct {
BlockHeight int64
TxIndex int32
Name string
StartsAt pgtype.Timestamp
EndsAt pgtype.Timestamp
Tiers [][]byte
SellerPublicKey string
MaxPerAddress int32
DeployTxHash string
MaxDiscountPercentage int32
SellerWallet string
}
func (q *Queries) CreateNodeSale(ctx context.Context, arg CreateNodeSaleParams) error {
_, err := q.db.Exec(ctx, createNodeSale,
arg.BlockHeight,
arg.TxIndex,
arg.Name,
arg.StartsAt,
arg.EndsAt,
arg.Tiers,
arg.SellerPublicKey,
arg.MaxPerAddress,
arg.DeployTxHash,
arg.MaxDiscountPercentage,
arg.SellerWallet,
)
return err
}
const getNodeSale = `-- name: GetNodeSale :many
SELECT block_height, tx_index, name, starts_at, ends_at, tiers, seller_public_key, max_per_address, deploy_tx_hash, max_discount_percentage, seller_wallet
FROM node_sales
WHERE block_height = $1 AND
tx_index = $2
`
type GetNodeSaleParams struct {
BlockHeight int64
TxIndex int32
}
func (q *Queries) GetNodeSale(ctx context.Context, arg GetNodeSaleParams) ([]NodeSale, error) {
rows, err := q.db.Query(ctx, getNodeSale, arg.BlockHeight, arg.TxIndex)
if err != nil {
return nil, err
}
defer rows.Close()
var items []NodeSale
for rows.Next() {
var i NodeSale
if err := rows.Scan(
&i.BlockHeight,
&i.TxIndex,
&i.Name,
&i.StartsAt,
&i.EndsAt,
&i.Tiers,
&i.SellerPublicKey,
&i.MaxPerAddress,
&i.DeployTxHash,
&i.MaxDiscountPercentage,
&i.SellerWallet,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}

View File

@@ -0,0 +1,20 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
// source: test.sql
package gen
import (
"context"
)
const clearEvents = `-- name: ClearEvents :exec
DELETE FROM events
WHERE tx_hash <> ''
`
func (q *Queries) ClearEvents(ctx context.Context) error {
_, err := q.db.Exec(ctx, clearEvents)
return err
}

View File

@@ -0,0 +1,74 @@
package postgres
import (
"encoding/json"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
"github.com/gaze-network/indexer-network/modules/nodesale/repository/postgres/gen"
"github.com/samber/lo"
)
func mapNodes(nodes []gen.Node) []entity.Node {
return lo.Map(nodes, func(item gen.Node, index int) entity.Node {
return entity.Node{
SaleBlock: uint64(item.SaleBlock),
SaleTxIndex: uint32(item.SaleTxIndex),
NodeID: uint32(item.NodeID),
TierIndex: item.TierIndex,
DelegatedTo: item.DelegatedTo,
OwnerPublicKey: item.OwnerPublicKey,
PurchaseTxHash: item.PurchaseTxHash,
DelegateTxHash: item.DelegateTxHash,
}
})
}
func mapNodeSales(nodeSales []gen.NodeSale) []entity.NodeSale {
return lo.Map(nodeSales, func(item gen.NodeSale, index int) entity.NodeSale {
return entity.NodeSale{
BlockHeight: uint64(item.BlockHeight),
TxIndex: uint32(item.TxIndex),
Name: item.Name,
StartsAt: item.StartsAt.Time,
EndsAt: item.EndsAt.Time,
Tiers: item.Tiers,
SellerPublicKey: item.SellerPublicKey,
MaxPerAddress: uint32(item.MaxPerAddress),
DeployTxHash: item.DeployTxHash,
MaxDiscountPercentage: item.MaxDiscountPercentage,
SellerWallet: item.SellerWallet,
}
})
}
func mapNodeCountByTierIndexRows(nodeCount []gen.GetNodeCountByTierIndexRow) []datagateway.GetNodeCountByTierIndexRow {
return lo.Map(nodeCount, func(item gen.GetNodeCountByTierIndexRow, index int) datagateway.GetNodeCountByTierIndexRow {
return datagateway.GetNodeCountByTierIndexRow{
TierIndex: item.TierIndex,
}
})
}
func mapNodeSalesEvents(events []gen.Event) []entity.NodeSaleEvent {
return lo.Map(events, func(item gen.Event, index int) entity.NodeSaleEvent {
var meta entity.MetadataEventPurchase
err := json.Unmarshal(item.Metadata, &meta)
if err != nil {
meta = entity.MetadataEventPurchase{}
}
return entity.NodeSaleEvent{
TxHash: item.TxHash,
BlockHeight: item.BlockHeight,
TxIndex: item.TxIndex,
WalletAddress: item.WalletAddress,
Valid: item.Valid,
Action: item.Action,
RawMessage: item.RawMessage,
ParsedMessage: item.ParsedMessage,
BlockTimestamp: item.BlockTimestamp.Time.UTC(),
BlockHash: item.BlockHash,
Metadata: &meta,
}
})
}

View File

@@ -0,0 +1,247 @@
package postgres
import (
"context"
"encoding/json"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/internal/postgres"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
"github.com/gaze-network/indexer-network/modules/nodesale/repository/postgres/gen"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgtype"
"github.com/samber/lo"
)
type Repository struct {
db postgres.DB
queries *gen.Queries
tx pgx.Tx
}
func NewRepository(db postgres.DB) *Repository {
return &Repository{
db: db,
queries: gen.New(db),
}
}
func (repo *Repository) CreateBlock(ctx context.Context, arg entity.Block) error {
err := repo.queries.CreateBlock(ctx, gen.CreateBlockParams{
BlockHeight: arg.BlockHeight,
BlockHash: arg.BlockHash,
Module: arg.Module,
})
if err != nil {
return errors.Wrap(err, "Cannot Add block")
}
return nil
}
func (repo *Repository) GetBlock(ctx context.Context, blockHeight int64) (*entity.Block, error) {
block, err := repo.queries.GetBlock(ctx, blockHeight)
if err != nil {
return nil, errors.Wrap(err, "Cannot get block")
}
return &entity.Block{
BlockHeight: block.BlockHeight,
BlockHash: block.BlockHash,
Module: block.Module,
}, nil
}
func (repo *Repository) GetLastProcessedBlock(ctx context.Context) (*entity.Block, error) {
block, err := repo.queries.GetLastProcessedBlock(ctx)
if err != nil {
return nil, errors.Wrap(err, "Cannot get last processed block")
}
return &entity.Block{
BlockHeight: block.BlockHeight,
BlockHash: block.BlockHash,
Module: block.Module,
}, nil
}
func (repo *Repository) RemoveBlockFrom(ctx context.Context, fromBlock int64) (int64, error) {
affected, err := repo.queries.RemoveBlockFrom(ctx, fromBlock)
if err != nil {
return 0, errors.Wrap(err, "Cannot remove blocks")
}
return affected, nil
}
func (repo *Repository) RemoveEventsFromBlock(ctx context.Context, fromBlock int64) (int64, error) {
affected, err := repo.queries.RemoveEventsFromBlock(ctx, fromBlock)
if err != nil {
return 0, errors.Wrap(err, "Cannot remove events")
}
return affected, nil
}
func (repo *Repository) ClearDelegate(ctx context.Context) (int64, error) {
affected, err := repo.queries.ClearDelegate(ctx)
if err != nil {
return 0, errors.Wrap(err, "Cannot clear delegate")
}
return affected, nil
}
func (repo *Repository) GetNodesByIds(ctx context.Context, arg datagateway.GetNodesByIdsParams) ([]entity.Node, error) {
nodes, err := repo.queries.GetNodesByIds(ctx, gen.GetNodesByIdsParams{
SaleBlock: int64(arg.SaleBlock),
SaleTxIndex: int32(arg.SaleTxIndex),
NodeIds: lo.Map(arg.NodeIds, func(item uint32, index int) int32 { return int32(item) }),
})
if err != nil {
return nil, errors.Wrap(err, "Cannot get nodes")
}
return mapNodes(nodes), nil
}
func (repo *Repository) CreateEvent(ctx context.Context, arg entity.NodeSaleEvent) error {
metaDataBytes := []byte("{}")
if arg.Metadata != nil {
metaDataBytes, _ = json.Marshal(arg.Metadata)
}
err := repo.queries.CreateEvent(ctx, gen.CreateEventParams{
TxHash: arg.TxHash,
BlockHeight: arg.BlockHeight,
TxIndex: arg.TxIndex,
WalletAddress: arg.WalletAddress,
Valid: arg.Valid,
Action: arg.Action,
RawMessage: arg.RawMessage,
ParsedMessage: arg.ParsedMessage,
BlockTimestamp: pgtype.Timestamp{Time: arg.BlockTimestamp.UTC(), Valid: true},
BlockHash: arg.BlockHash,
Metadata: metaDataBytes,
Reason: arg.Reason,
})
if err != nil {
return errors.Wrap(err, "Cannot add event")
}
return nil
}
func (repo *Repository) SetDelegates(ctx context.Context, arg datagateway.SetDelegatesParams) (int64, error) {
affected, err := repo.queries.SetDelegates(ctx, gen.SetDelegatesParams{
SaleBlock: int64(arg.SaleBlock),
SaleTxIndex: arg.SaleTxIndex,
Delegatee: arg.Delegatee,
DelegateTxHash: arg.DelegateTxHash,
NodeIds: lo.Map(arg.NodeIds, func(item uint32, index int) int32 { return int32(item) }),
})
if err != nil {
return 0, errors.Wrap(err, "Cannot set delegate")
}
return affected, nil
}
func (repo *Repository) CreateNodeSale(ctx context.Context, arg entity.NodeSale) error {
err := repo.queries.CreateNodeSale(ctx, gen.CreateNodeSaleParams{
BlockHeight: int64(arg.BlockHeight),
TxIndex: int32(arg.TxIndex),
Name: arg.Name,
StartsAt: pgtype.Timestamp{Time: arg.StartsAt.UTC(), Valid: true},
EndsAt: pgtype.Timestamp{Time: arg.EndsAt.UTC(), Valid: true},
Tiers: arg.Tiers,
SellerPublicKey: arg.SellerPublicKey,
MaxPerAddress: int32(arg.MaxPerAddress),
DeployTxHash: arg.DeployTxHash,
MaxDiscountPercentage: arg.MaxDiscountPercentage,
SellerWallet: arg.SellerWallet,
})
if err != nil {
return errors.Wrap(err, "Cannot add NodeSale")
}
return nil
}
func (repo *Repository) GetNodeSale(ctx context.Context, arg datagateway.GetNodeSaleParams) ([]entity.NodeSale, error) {
nodeSales, err := repo.queries.GetNodeSale(ctx, gen.GetNodeSaleParams{
BlockHeight: int64(arg.BlockHeight),
TxIndex: int32(arg.TxIndex),
})
if err != nil {
return nil, errors.Wrap(err, "Cannot get NodeSale")
}
return mapNodeSales(nodeSales), nil
}
func (repo *Repository) GetNodesByOwner(ctx context.Context, arg datagateway.GetNodesByOwnerParams) ([]entity.Node, error) {
nodes, err := repo.queries.GetNodesByOwner(ctx, gen.GetNodesByOwnerParams{
SaleBlock: int64(arg.SaleBlock),
SaleTxIndex: int32(arg.SaleTxIndex),
OwnerPublicKey: arg.OwnerPublicKey,
})
if err != nil {
return nil, errors.Wrap(err, "Cannot get nodes by owner")
}
return mapNodes(nodes), nil
}
func (repo *Repository) CreateNode(ctx context.Context, arg entity.Node) error {
err := repo.queries.CreateNode(ctx, gen.CreateNodeParams{
SaleBlock: int64(arg.SaleBlock),
SaleTxIndex: int32(arg.SaleTxIndex),
NodeID: int32(arg.NodeID),
TierIndex: arg.TierIndex,
DelegatedTo: arg.DelegatedTo,
OwnerPublicKey: arg.OwnerPublicKey,
PurchaseTxHash: arg.PurchaseTxHash,
DelegateTxHash: arg.DelegateTxHash,
})
if err != nil {
return errors.Wrap(err, "Cannot add node")
}
return nil
}
func (repo *Repository) GetNodeCountByTierIndex(ctx context.Context, arg datagateway.GetNodeCountByTierIndexParams) ([]datagateway.GetNodeCountByTierIndexRow, error) {
nodeCount, err := repo.queries.GetNodeCountByTierIndex(ctx, gen.GetNodeCountByTierIndexParams{
SaleBlock: int64(arg.SaleBlock),
SaleTxIndex: int32(arg.SaleTxIndex),
FromTier: int32(arg.FromTier),
ToTier: int32(arg.ToTier),
})
if err != nil {
return nil, errors.Wrap(err, "Cannot get node count by tier index")
}
return mapNodeCountByTierIndexRows(nodeCount), nil
}
func (repo *Repository) GetNodesByPubkey(ctx context.Context, arg datagateway.GetNodesByPubkeyParams) ([]entity.Node, error) {
nodes, err := repo.queries.GetNodesByPubkey(ctx, gen.GetNodesByPubkeyParams{
SaleBlock: arg.SaleBlock,
SaleTxIndex: arg.SaleTxIndex,
OwnerPublicKey: arg.OwnerPublicKey,
DelegatedTo: arg.DelegatedTo,
})
if err != nil {
return nil, errors.Wrap(err, "Cannot get nodes by public key")
}
return mapNodes(nodes), nil
}
func (repo *Repository) GetEventsByWallet(ctx context.Context, walletAddress string) ([]entity.NodeSaleEvent, error) {
events, err := repo.queries.GetEventsByWallet(ctx, walletAddress)
if err != nil {
return nil, errors.Wrap(err, "cannot get events by wallet")
}
return mapNodeSalesEvents(events), nil
}
func (repo *Repository) GetNodesByDeployment(ctx context.Context, saleBlock int64, saleTxIndex int32) ([]entity.Node, error) {
nodes, err := repo.queries.GetNodesByDeployment(ctx, gen.GetNodesByDeploymentParams{
SaleBlock: saleBlock,
SaleTxIndex: saleTxIndex,
})
if err != nil {
return nil, errors.Wrap(err, "cannot get nodes by deploy")
}
return mapNodes(nodes), nil
}

View File

@@ -0,0 +1,62 @@
package postgres
import (
"context"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/jackc/pgx/v5"
)
var ErrTxAlreadyExists = errors.New("Transaction already exists. Call Commit() or Rollback() first.")
func (r *Repository) begin(ctx context.Context) (*Repository, error) {
if r.tx != nil {
return nil, errors.WithStack(ErrTxAlreadyExists)
}
tx, err := r.db.Begin(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to begin transaction")
}
return &Repository{
db: r.db,
queries: r.queries.WithTx(tx),
tx: tx,
}, nil
}
func (r *Repository) BeginNodeSaleTx(ctx context.Context) (datagateway.NodeSaleDataGatewayWithTx, error) {
repo, err := r.begin(ctx)
if err != nil {
return nil, errors.WithStack(err)
}
return repo, nil
}
func (r *Repository) Commit(ctx context.Context) error {
if r.tx == nil {
return nil
}
err := r.tx.Commit(ctx)
if err != nil {
return errors.Wrap(err, "failed to commit transaction")
}
r.tx = nil
return nil
}
func (r *Repository) Rollback(ctx context.Context) error {
if r.tx == nil {
return nil
}
err := r.tx.Rollback(ctx)
if err != nil && !errors.Is(err, pgx.ErrTxClosed) {
return errors.Wrap(err, "failed to rollback transaction")
}
if err == nil {
logger.DebugContext(ctx, "rolled back transaction")
}
r.tx = nil
return nil
}

View File

@@ -0,0 +1,25 @@
package nodesale
import "github.com/btcsuite/btcd/txscript"
func extractTapScript(witness [][]byte) (tokenizer txscript.ScriptTokenizer, controlBlock *txscript.ControlBlock, isTapScript bool) {
witness = removeAnnexFromWitness(witness)
if len(witness) < 2 {
return txscript.ScriptTokenizer{}, nil, false
}
script := witness[len(witness)-2]
rawControl := witness[len(witness)-1]
parsedControl, err := txscript.ParseControlBlock(rawControl)
if err != nil {
return txscript.ScriptTokenizer{}, nil, false
}
return txscript.MakeScriptTokenizer(0, script), parsedControl, true
}
func removeAnnexFromWitness(witness [][]byte) [][]byte {
if len(witness) >= 2 && len(witness[len(witness)-1]) > 0 && witness[len(witness)-1][0] == txscript.TaprootAnnexTag {
return witness[:len(witness)-1]
}
return witness
}

View File

@@ -1,23 +1,29 @@
package httphandler
import (
"slices"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
"github.com/gofiber/fiber/v2"
"github.com/samber/lo"
)
type getBalancesByAddressRequest struct {
type getBalancesRequest struct {
Wallet string `params:"wallet"`
Id string `query:"id"`
BlockHeight uint64 `query:"blockHeight"`
Limit int32 `query:"limit"`
Offset int32 `query:"offset"`
}
func (r getBalancesByAddressRequest) Validate() error {
const (
getBalancesMaxLimit = 5000
getBalancesDefaultLimit = 100
)
func (r getBalancesRequest) Validate() error {
var errList []error
if r.Wallet == "" {
errList = append(errList, errors.New("'wallet' is required"))
@@ -25,6 +31,12 @@ func (r getBalancesByAddressRequest) Validate() error {
if r.Id != "" && !isRuneIdOrRuneName(r.Id) {
errList = append(errList, errors.New("'id' is not valid rune id or rune name"))
}
if r.Limit < 0 {
errList = append(errList, errors.New("'limit' must be non-negative"))
}
if r.Limit > getBalancesMaxLimit {
errList = append(errList, errors.Errorf("'limit' cannot exceed %d", getBalancesMaxLimit))
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
@@ -36,15 +48,15 @@ type balance struct {
Decimals uint8 `json:"decimals"`
}
type getBalancesByAddressResult struct {
type getBalancesResult struct {
List []balance `json:"list"`
BlockHeight uint64 `json:"blockHeight"`
}
type getBalancesByAddressResponse = HttpResponse[getBalancesByAddressResult]
type getBalancesResponse = HttpResponse[getBalancesResult]
func (h *HttpHandler) GetBalancesByAddress(ctx *fiber.Ctx) (err error) {
var req getBalancesByAddressRequest
func (h *HttpHandler) GetBalances(ctx *fiber.Ctx) (err error) {
var req getBalancesRequest
if err := ctx.ParamsParser(&req); err != nil {
return errors.WithStack(err)
}
@@ -54,6 +66,9 @@ func (h *HttpHandler) GetBalancesByAddress(ctx *fiber.Ctx) (err error) {
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
if req.Limit == 0 {
req.Limit = getBalancesDefaultLimit
}
pkScript, ok := resolvePkScript(h.network, req.Wallet)
if !ok {
@@ -64,49 +79,52 @@ func (h *HttpHandler) GetBalancesByAddress(ctx *fiber.Ctx) (err error) {
if blockHeight == 0 {
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("latest block not found")
}
return errors.Wrap(err, "error during GetLatestBlock")
}
blockHeight = uint64(blockHeader.Height)
}
balances, err := h.usecase.GetBalancesByPkScript(ctx.UserContext(), pkScript, blockHeight)
balances, err := h.usecase.GetBalancesByPkScript(ctx.UserContext(), pkScript, blockHeight, req.Limit, req.Offset)
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("balances not found")
}
return errors.Wrap(err, "error during GetBalancesByPkScript")
}
runeId, ok := h.resolveRuneId(ctx.UserContext(), req.Id)
if ok {
// filter out balances that don't match the requested rune id
for key := range balances {
if key != runeId {
delete(balances, key)
}
}
balances = lo.Filter(balances, func(b *entity.Balance, _ int) bool {
return b.RuneId == runeId
})
}
balanceRuneIds := lo.Keys(balances)
balanceRuneIds := lo.Map(balances, func(b *entity.Balance, _ int) runes.RuneId {
return b.RuneId
})
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx.UserContext(), balanceRuneIds)
if err != nil {
return errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch")
}
balanceList := make([]balance, 0, len(balances))
for id, b := range balances {
runeEntry := runeEntries[id]
for _, b := range balances {
runeEntry := runeEntries[b.RuneId]
balanceList = append(balanceList, balance{
Amount: b.Amount,
Id: id,
Id: b.RuneId,
Name: runeEntry.SpacedRune,
Symbol: string(runeEntry.Symbol),
Decimals: runeEntry.Divisibility,
})
}
slices.SortFunc(balanceList, func(i, j balance) int {
return j.Amount.Cmp(i.Amount)
})
resp := getBalancesByAddressResponse{
Result: &getBalancesByAddressResult{
resp := getBalancesResponse{
Result: &getBalancesResult{
BlockHeight: blockHeight,
List: balanceList,
},

View File

@@ -3,10 +3,11 @@ package httphandler
import (
"context"
"fmt"
"slices"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gofiber/fiber/v2"
"github.com/samber/lo"
"golang.org/x/sync/errgroup"
@@ -16,33 +17,49 @@ type getBalanceQuery struct {
Wallet string `json:"wallet"`
Id string `json:"id"`
BlockHeight uint64 `json:"blockHeight"`
Limit int32 `json:"limit"`
Offset int32 `json:"offset"`
}
type getBalancesByAddressBatchRequest struct {
type getBalancesBatchRequest struct {
Queries []getBalanceQuery `json:"queries"`
}
func (r getBalancesByAddressBatchRequest) Validate() error {
const getBalancesBatchMaxQueries = 100
func (r getBalancesBatchRequest) Validate() error {
var errList []error
for _, query := range r.Queries {
if len(r.Queries) == 0 {
errList = append(errList, errors.New("at least one query is required"))
}
if len(r.Queries) > getBalancesBatchMaxQueries {
errList = append(errList, errors.Errorf("cannot exceed %d queries", getBalancesBatchMaxQueries))
}
for i, query := range r.Queries {
if query.Wallet == "" {
errList = append(errList, errors.Errorf("queries[%d]: 'wallet' is required"))
errList = append(errList, errors.Errorf("queries[%d]: 'wallet' is required", i))
}
if query.Id != "" && !isRuneIdOrRuneName(query.Id) {
errList = append(errList, errors.Errorf("queries[%d]: 'id' is not valid rune id or rune name"))
errList = append(errList, errors.Errorf("queries[%d]: 'id' is not valid rune id or rune name", i))
}
if query.Limit < 0 {
errList = append(errList, errors.Errorf("queries[%d]: 'limit' must be non-negative", i))
}
if query.Limit > getBalancesMaxLimit {
errList = append(errList, errors.Errorf("queries[%d]: 'limit' cannot exceed %d", i, getBalancesMaxLimit))
}
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
type getBalancesByAddressBatchResult struct {
List []*getBalancesByAddressResult `json:"list"`
type getBalancesBatchResult struct {
List []*getBalancesResult `json:"list"`
}
type getBalancesByAddressBatchResponse = HttpResponse[getBalancesByAddressBatchResult]
type getBalancesBatchResponse = HttpResponse[getBalancesBatchResult]
func (h *HttpHandler) GetBalancesByAddressBatch(ctx *fiber.Ctx) (err error) {
var req getBalancesByAddressBatchRequest
func (h *HttpHandler) GetBalancesBatch(ctx *fiber.Ctx) (err error) {
var req getBalancesBatchRequest
if err := ctx.BodyParser(&req); err != nil {
return errors.WithStack(err)
}
@@ -53,11 +70,14 @@ func (h *HttpHandler) GetBalancesByAddressBatch(ctx *fiber.Ctx) (err error) {
var latestBlockHeight uint64
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("latest block not found")
}
return errors.Wrap(err, "error during GetLatestBlock")
}
latestBlockHeight = uint64(blockHeader.Height)
processQuery := func(ctx context.Context, query getBalanceQuery, queryIndex int) (*getBalancesByAddressResult, error) {
processQuery := func(ctx context.Context, query getBalanceQuery, queryIndex int) (*getBalancesResult, error) {
pkScript, ok := resolvePkScript(h.network, query.Wallet)
if !ok {
return nil, errs.NewPublicError(fmt.Sprintf("unable to resolve pkscript from \"queries[%d].wallet\"", queryIndex))
@@ -68,50 +88,57 @@ func (h *HttpHandler) GetBalancesByAddressBatch(ctx *fiber.Ctx) (err error) {
blockHeight = latestBlockHeight
}
balances, err := h.usecase.GetBalancesByPkScript(ctx, pkScript, blockHeight)
if query.Limit == 0 {
query.Limit = getBalancesMaxLimit
}
balances, err := h.usecase.GetBalancesByPkScript(ctx, pkScript, blockHeight, query.Limit, query.Offset)
if err != nil {
if errors.Is(err, errs.NotFound) {
return nil, errs.NewPublicError("balances not found")
}
return nil, errors.Wrap(err, "error during GetBalancesByPkScript")
}
runeId, ok := h.resolveRuneId(ctx, query.Id)
if ok {
// filter out balances that don't match the requested rune id
for key := range balances {
if key != runeId {
delete(balances, key)
}
}
balances = lo.Filter(balances, func(b *entity.Balance, _ int) bool {
return b.RuneId == runeId
})
}
balanceRuneIds := lo.Keys(balances)
balanceRuneIds := lo.Map(balances, func(b *entity.Balance, _ int) runes.RuneId {
return b.RuneId
})
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx, balanceRuneIds)
if err != nil {
if errors.Is(err, errs.NotFound) {
return nil, errs.NewPublicError("rune not found")
}
return nil, errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch")
}
balanceList := make([]balance, 0, len(balances))
for id, b := range balances {
runeEntry := runeEntries[id]
for _, b := range balances {
runeEntry := runeEntries[b.RuneId]
balanceList = append(balanceList, balance{
Amount: b.Amount,
Id: id,
Id: b.RuneId,
Name: runeEntry.SpacedRune,
Symbol: string(runeEntry.Symbol),
Decimals: runeEntry.Divisibility,
})
}
slices.SortFunc(balanceList, func(i, j balance) int {
return j.Amount.Cmp(i.Amount)
})
result := getBalancesByAddressResult{
result := getBalancesResult{
BlockHeight: blockHeight,
List: balanceList,
}
return &result, nil
}
results := make([]*getBalancesByAddressResult, len(req.Queries))
results := make([]*getBalancesResult, len(req.Queries))
eg, ectx := errgroup.WithContext(ctx.UserContext())
for i, query := range req.Queries {
i := i
@@ -129,8 +156,8 @@ func (h *HttpHandler) GetBalancesByAddressBatch(ctx *fiber.Ctx) (err error) {
return errors.WithStack(err)
}
resp := getBalancesByAddressBatchResponse{
Result: &getBalancesByAddressBatchResult{
resp := getBalancesBatchResponse{
Result: &getBalancesBatchResult{
List: results,
},
}

View File

@@ -1,28 +1,12 @@
package httphandler
import (
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/runes/constants"
"github.com/gofiber/fiber/v2"
)
var startingBlockHeader = map[common.Network]types.BlockHeader{
common.NetworkMainnet: {
Height: 839999,
Hash: *utils.Must(chainhash.NewHashFromStr("0000000000000000000172014ba58d66455762add0512355ad651207918494ab")),
PrevBlock: *utils.Must(chainhash.NewHashFromStr("00000000000000000001dcce6ce7c8a45872cafd1fb04732b447a14a91832591")),
},
common.NetworkTestnet: {
Height: 2583200,
Hash: *utils.Must(chainhash.NewHashFromStr("000000000006c5f0dfcd9e0e81f27f97a87aef82087ffe69cd3c390325bb6541")),
PrevBlock: *utils.Must(chainhash.NewHashFromStr("00000000000668f3bafac992f53424774515440cb47e1cb9e73af3f496139e28")),
},
}
type getCurrentBlockResult struct {
Hash string `json:"hash"`
Height int64 `json:"height"`
@@ -36,7 +20,7 @@ func (h *HttpHandler) GetCurrentBlock(ctx *fiber.Ctx) (err error) {
if !errors.Is(err, errs.NotFound) {
return errors.Wrap(err, "error during GetLatestBlock")
}
blockHeader = startingBlockHeader[h.network]
blockHeader = constants.StartingBlockHeader[h.network]
}
resp := getCurrentBlockResponse{

View File

@@ -1,10 +1,13 @@
package httphandler
import (
"bytes"
"encoding/hex"
"slices"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
"github.com/gofiber/fiber/v2"
@@ -14,13 +17,26 @@ import (
type getHoldersRequest struct {
Id string `params:"id"`
BlockHeight uint64 `query:"blockHeight"`
Limit int32 `json:"limit"`
Offset int32 `json:"offset"`
}
const (
getHoldersMaxLimit = 1000
getHoldersDefaultLimit = 100
)
func (r getHoldersRequest) Validate() error {
var errList []error
if !isRuneIdOrRuneName(r.Id) {
errList = append(errList, errors.New("'id' is not valid rune id or rune name"))
}
if r.Limit < 0 {
errList = append(errList, errors.New("'limit' must be non-negative"))
}
if r.Limit > getHoldersMaxLimit {
errList = append(errList, errors.Errorf("'limit' cannot exceed %d", getHoldersMaxLimit))
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
@@ -35,6 +51,7 @@ type getHoldersResult struct {
BlockHeight uint64 `json:"blockHeight"`
TotalSupply uint128.Uint128 `json:"totalSupply"`
MintedAmount uint128.Uint128 `json:"mintedAmount"`
Decimals uint8 `json:"decimals"`
List []holdingBalance `json:"list"`
}
@@ -61,6 +78,10 @@ func (h *HttpHandler) GetHolders(ctx *fiber.Ctx) (err error) {
blockHeight = uint64(blockHeader.Height)
}
if req.Limit == 0 {
req.Limit = getHoldersDefaultLimit
}
var runeId runes.RuneId
if req.Id != "" {
var ok bool
@@ -72,10 +93,16 @@ func (h *HttpHandler) GetHolders(ctx *fiber.Ctx) (err error) {
runeEntry, err := h.usecase.GetRuneEntryByRuneIdAndHeight(ctx.UserContext(), runeId, blockHeight)
if err != nil {
return errors.Wrap(err, "error during GetHoldersByHeight")
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("rune not found")
}
return errors.Wrap(err, "error during GetRuneEntryByRuneIdAndHeight")
}
holdingBalances, err := h.usecase.GetBalancesByRuneId(ctx.UserContext(), runeId, blockHeight)
holdingBalances, err := h.usecase.GetBalancesByRuneId(ctx.UserContext(), runeId, blockHeight, req.Limit, req.Offset)
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("balances not found")
}
return errors.Wrap(err, "error during GetBalancesByRuneId")
}
@@ -101,11 +128,20 @@ func (h *HttpHandler) GetHolders(ctx *fiber.Ctx) (err error) {
})
}
// sort by amount descending, then pk script ascending
slices.SortFunc(holdingBalances, func(b1, b2 *entity.Balance) int {
if b1.Amount.Cmp(b2.Amount) == 0 {
return bytes.Compare(b1.PkScript, b2.PkScript)
}
return b2.Amount.Cmp(b1.Amount)
})
resp := getHoldersResponse{
Result: &getHoldersResult{
BlockHeight: blockHeight,
TotalSupply: totalSupply,
MintedAmount: mintedAmount,
Decimals: runeEntry.Divisibility,
List: list,
},
}

View File

@@ -83,6 +83,9 @@ func (h *HttpHandler) GetTokenInfo(ctx *fiber.Ctx) (err error) {
if blockHeight == 0 {
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("latest block not found")
}
return errors.Wrap(err, "error during GetLatestBlock")
}
blockHeight = uint64(blockHeader.Height)
@@ -99,10 +102,16 @@ func (h *HttpHandler) GetTokenInfo(ctx *fiber.Ctx) (err error) {
runeEntry, err := h.usecase.GetRuneEntryByRuneIdAndHeight(ctx.UserContext(), runeId, blockHeight)
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("rune not found")
}
return errors.Wrap(err, "error during GetTokenInfoByHeight")
}
holdingBalances, err := h.usecase.GetBalancesByRuneId(ctx.UserContext(), runeId, blockHeight)
holdingBalances, err := h.usecase.GetBalancesByRuneId(ctx.UserContext(), runeId, blockHeight, -1, 0) // get all balances
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("rune not found")
}
return errors.Wrap(err, "error during GetBalancesByRuneId")
}

View File

@@ -1,6 +1,7 @@
package httphandler
import (
"cmp"
"encoding/hex"
"fmt"
"slices"
@@ -15,13 +16,19 @@ import (
)
type getTransactionsRequest struct {
Wallet string `query:"wallet"`
Id string `query:"id"`
FromBlock int64 `query:"fromBlock"`
ToBlock int64 `query:"toBlock"`
Wallet string `query:"wallet"`
Id string `query:"id"`
FromBlock int64 `query:"fromBlock"`
ToBlock int64 `query:"toBlock"`
Limit int32 `query:"limit"`
Offset int32 `query:"offset"`
}
const (
getTransactionsMaxLimit = 3000
getTransactionsDefaultLimit = 100
)
func (r getTransactionsRequest) Validate() error {
var errList []error
if r.Id != "" && !isRuneIdOrRuneName(r.Id) {
@@ -33,6 +40,12 @@ func (r getTransactionsRequest) Validate() error {
if r.ToBlock < -1 {
errList = append(errList, errors.Errorf("invalid toBlock range"))
}
if r.Limit < 0 {
errList = append(errList, errors.New("'limit' must be non-negative"))
}
if r.Limit > getTransactionsMaxLimit {
errList = append(errList, errors.Errorf("'limit' cannot exceed %d", getTransactionsMaxLimit))
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
@@ -133,6 +146,9 @@ func (h *HttpHandler) GetTransactions(ctx *fiber.Ctx) (err error) {
return errs.NewPublicError("unable to resolve rune id from \"id\"")
}
}
if req.Limit == 0 {
req.Limit = getTransactionsDefaultLimit
}
// default to latest block
if req.ToBlock == 0 {
@@ -143,6 +159,9 @@ func (h *HttpHandler) GetTransactions(ctx *fiber.Ctx) (err error) {
if req.FromBlock == -1 || req.ToBlock == -1 {
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("latest block not found")
}
return errors.Wrap(err, "error during GetLatestBlock")
}
if req.FromBlock == -1 {
@@ -158,8 +177,11 @@ func (h *HttpHandler) GetTransactions(ctx *fiber.Ctx) (err error) {
return errs.NewPublicError(fmt.Sprintf("fromBlock must be less than or equal to toBlock, got fromBlock=%d, toBlock=%d", req.FromBlock, req.ToBlock))
}
txs, err := h.usecase.GetRuneTransactions(ctx.UserContext(), pkScript, runeId, uint64(req.FromBlock), uint64(req.ToBlock))
txs, err := h.usecase.GetRuneTransactions(ctx.UserContext(), pkScript, runeId, uint64(req.FromBlock), uint64(req.ToBlock), req.Limit, req.Offset)
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("transactions not found")
}
return errors.Wrap(err, "error during GetRuneTransactions")
}
@@ -181,6 +203,9 @@ func (h *HttpHandler) GetTransactions(ctx *fiber.Ctx) (err error) {
allRuneIds = lo.Uniq(allRuneIds)
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx.UserContext(), allRuneIds)
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("rune entries not found")
}
return errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch")
}
@@ -279,12 +304,12 @@ func (h *HttpHandler) GetTransactions(ctx *fiber.Ctx) (err error) {
}
txList = append(txList, respTx)
}
// sort by block height ASC, then index ASC
// sort by block height DESC, then index DESC
slices.SortFunc(txList, func(t1, t2 transaction) int {
if t1.BlockHeight != t2.BlockHeight {
return int(t1.BlockHeight - t2.BlockHeight)
return cmp.Compare(t2.BlockHeight, t1.BlockHeight)
}
return int(t1.Index - t2.Index)
return cmp.Compare(t2.Index, t1.Index)
})
resp := getTransactionsResponse{

View File

@@ -2,7 +2,6 @@ package httphandler
import (
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
@@ -12,13 +11,20 @@ import (
"github.com/samber/lo"
)
type getUTXOsByAddressRequest struct {
type getUTXOsRequest struct {
Wallet string `params:"wallet"`
Id string `query:"id"`
BlockHeight uint64 `query:"blockHeight"`
Limit int32 `query:"limit"`
Offset int32 `query:"offset"`
}
func (r getUTXOsByAddressRequest) Validate() error {
const (
getUTXOsMaxLimit = 3000
getUTXOsDefaultLimit = 100
)
func (r getUTXOsRequest) Validate() error {
var errList []error
if r.Wallet == "" {
errList = append(errList, errors.New("'wallet' is required"))
@@ -26,6 +32,12 @@ func (r getUTXOsByAddressRequest) Validate() error {
if r.Id != "" && !isRuneIdOrRuneName(r.Id) {
errList = append(errList, errors.New("'id' is not valid rune id or rune name"))
}
if r.Limit < 0 {
errList = append(errList, errors.New("'limit' must be non-negative"))
}
if r.Limit > getUTXOsMaxLimit {
errList = append(errList, errors.Errorf("'limit' cannot exceed %d", getUTXOsMaxLimit))
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
@@ -41,21 +53,22 @@ type utxoExtend struct {
Runes []runeBalance `json:"runes"`
}
type utxo struct {
type utxoItem struct {
TxHash chainhash.Hash `json:"txHash"`
OutputIndex uint32 `json:"outputIndex"`
Sats int64 `json:"sats"`
Extend utxoExtend `json:"extend"`
}
type getUTXOsByAddressResult struct {
List []utxo `json:"list"`
BlockHeight uint64 `json:"blockHeight"`
type getUTXOsResult struct {
List []utxoItem `json:"list"`
BlockHeight uint64 `json:"blockHeight"`
}
type getUTXOsByAddressResponse = HttpResponse[getUTXOsByAddressResult]
type getUTXOsResponse = HttpResponse[getUTXOsResult]
func (h *HttpHandler) GetUTXOsByAddress(ctx *fiber.Ctx) (err error) {
var req getUTXOsByAddressRequest
func (h *HttpHandler) GetUTXOs(ctx *fiber.Ctx) (err error) {
var req getUTXOsRequest
if err := ctx.ParamsParser(&req); err != nil {
return errors.WithStack(err)
}
@@ -71,36 +84,60 @@ func (h *HttpHandler) GetUTXOsByAddress(ctx *fiber.Ctx) (err error) {
return errs.NewPublicError("unable to resolve pkscript from \"wallet\"")
}
if req.Limit == 0 {
req.Limit = getUTXOsDefaultLimit
}
blockHeight := req.BlockHeight
if blockHeight == 0 {
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("latest block not found")
}
return errors.Wrap(err, "error during GetLatestBlock")
}
blockHeight = uint64(blockHeader.Height)
}
outPointBalances, err := h.usecase.GetUnspentOutPointBalancesByPkScript(ctx.UserContext(), pkScript, blockHeight)
if err != nil {
return errors.Wrap(err, "error during GetBalancesByPkScript")
var utxos []*entity.RunesUTXOWithSats
if runeId, ok := h.resolveRuneId(ctx.UserContext(), req.Id); ok {
utxos, err = h.usecase.GetRunesUTXOsByRuneIdAndPkScript(ctx.UserContext(), runeId, pkScript, blockHeight, req.Limit, req.Offset)
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("utxos not found")
}
return errors.Wrap(err, "error during GetBalancesByPkScript")
}
} else {
utxos, err = h.usecase.GetRunesUTXOsByPkScript(ctx.UserContext(), pkScript, blockHeight, req.Limit, req.Offset)
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("utxos not found")
}
return errors.Wrap(err, "error during GetBalancesByPkScript")
}
}
outPointBalanceRuneIds := lo.Map(outPointBalances, func(outPointBalance *entity.OutPointBalance, _ int) runes.RuneId {
return outPointBalance.RuneId
})
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx.UserContext(), outPointBalanceRuneIds)
runeIds := make(map[runes.RuneId]struct{}, 0)
for _, utxo := range utxos {
for _, balance := range utxo.RuneBalances {
runeIds[balance.RuneId] = struct{}{}
}
}
runeIdsList := lo.Keys(runeIds)
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx.UserContext(), runeIdsList)
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("rune entries not found")
}
return errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch")
}
groupedBalances := lo.GroupBy(outPointBalances, func(outPointBalance *entity.OutPointBalance) wire.OutPoint {
return outPointBalance.OutPoint
})
utxoList := make([]utxo, 0, len(groupedBalances))
for outPoint, balances := range groupedBalances {
runeBalances := make([]runeBalance, 0, len(balances))
for _, balance := range balances {
utxoRespList := make([]utxoItem, 0, len(utxos))
for _, utxo := range utxos {
runeBalances := make([]runeBalance, 0, len(utxo.RuneBalances))
for _, balance := range utxo.RuneBalances {
runeEntry := runeEntries[balance.RuneId]
runeBalances = append(runeBalances, runeBalance{
RuneId: balance.RuneId,
@@ -111,34 +148,20 @@ func (h *HttpHandler) GetUTXOsByAddress(ctx *fiber.Ctx) (err error) {
})
}
utxoList = append(utxoList, utxo{
TxHash: outPoint.Hash,
OutputIndex: outPoint.Index,
utxoRespList = append(utxoRespList, utxoItem{
TxHash: utxo.OutPoint.Hash,
OutputIndex: utxo.OutPoint.Index,
Sats: utxo.Sats,
Extend: utxoExtend{
Runes: runeBalances,
},
})
}
// filter by req.Id if exists
{
runeId, ok := h.resolveRuneId(ctx.UserContext(), req.Id)
if ok {
utxoList = lo.Filter(utxoList, func(u utxo, _ int) bool {
for _, runeBalance := range u.Extend.Runes {
if runeBalance.RuneId == runeId {
return true
}
}
return false
})
}
}
resp := getUTXOsByAddressResponse{
Result: &getUTXOsByAddressResult{
resp := getUTXOsResponse{
Result: &getUTXOsResult{
BlockHeight: blockHeight,
List: utxoList,
List: utxoRespList,
},
}

View File

@@ -0,0 +1,92 @@
package httphandler
import (
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/indexer-network/modules/runes/usecase"
"github.com/gofiber/fiber/v2"
"github.com/samber/lo"
)
type getUTXOsOutputByLocationRequest struct {
TxHash string `params:"txHash"`
OutputIndex int32 `query:"outputIndex"`
}
func (r getUTXOsOutputByLocationRequest) Validate() error {
var errList []error
if r.TxHash == "" {
errList = append(errList, errors.New("'txHash' is required"))
}
if r.OutputIndex < 0 {
errList = append(errList, errors.New("'outputIndex' must be non-negative"))
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
type getUTXOsOutputByTxIdResponse = HttpResponse[utxoItem]
func (h *HttpHandler) GetUTXOsOutputByLocation(ctx *fiber.Ctx) (err error) {
var req getUTXOsOutputByLocationRequest
if err := ctx.ParamsParser(&req); err != nil {
return errors.WithStack(err)
}
if err := ctx.QueryParser(&req); err != nil {
return errors.WithStack(err)
}
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
txHash, err := chainhash.NewHashFromStr(req.TxHash)
if err != nil {
return errs.WithPublicMessage(err, "unable to resolve txHash")
}
utxo, err := h.usecase.GetUTXOsOutputByLocation(ctx.UserContext(), *txHash, uint32(req.OutputIndex))
if err != nil {
if errors.Is(err, usecase.ErrUTXONotFound) {
return errs.NewPublicError("utxo not found")
}
return errors.WithStack(err)
}
runeIds := make(map[runes.RuneId]struct{}, 0)
for _, balance := range utxo.RuneBalances {
runeIds[balance.RuneId] = struct{}{}
}
runeIdsList := lo.Keys(runeIds)
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx.UserContext(), runeIdsList)
if err != nil {
if errors.Is(err, errs.NotFound) {
return errs.NewPublicError("rune entries not found")
}
return errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch")
}
runeBalances := make([]runeBalance, 0, len(utxo.RuneBalances))
for _, balance := range utxo.RuneBalances {
runeEntry := runeEntries[balance.RuneId]
runeBalances = append(runeBalances, runeBalance{
RuneId: balance.RuneId,
Rune: runeEntry.SpacedRune,
Symbol: string(runeEntry.Symbol),
Amount: balance.Amount,
Divisibility: runeEntry.Divisibility,
})
}
resp := getUTXOsOutputByTxIdResponse{
Result: &utxoItem{
TxHash: utxo.OutPoint.Hash,
OutputIndex: utxo.OutPoint.Index,
Sats: utxo.Sats,
Extend: utxoExtend{
Runes: runeBalances,
},
},
}
return errors.WithStack(ctx.JSON(resp))
}

View File

@@ -0,0 +1,136 @@
package httphandler
import (
"context"
"fmt"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/indexer-network/modules/runes/usecase"
"github.com/gofiber/fiber/v2"
"github.com/samber/lo"
"golang.org/x/sync/errgroup"
)
type getUTXOsOutputByLocationQuery struct {
TxHash string `json:"txHash"`
OutputIndex int32 `json:"outputIndex"`
}
type getUTXOsOutputByLocationBatchRequest struct {
Queries []getUTXOsOutputByLocationQuery `json:"queries"`
}
const getUTXOsOutputByLocationBatchMaxQueries = 100
func (r getUTXOsOutputByLocationBatchRequest) Validate() error {
var errList []error
if len(r.Queries) == 0 {
errList = append(errList, errors.New("at least one query is required"))
}
if len(r.Queries) > getUTXOsOutputByLocationBatchMaxQueries {
errList = append(errList, errors.Errorf("cannot exceed %d queries", getUTXOsOutputByLocationBatchMaxQueries))
}
for i, query := range r.Queries {
if query.TxHash == "" {
errList = append(errList, errors.Errorf("queries[%d]: 'txHash' is required", i))
}
if query.OutputIndex < 0 {
errList = append(errList, errors.Errorf("queries[%d]: 'outputIndex' must be non-negative", i))
}
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
type getUTXOsOutputByLocationBatchResult struct {
List []*utxoItem `json:"list"`
}
type getUTXOsOutputByLocationBatchResponse = HttpResponse[getUTXOsOutputByLocationBatchResult]
func (h *HttpHandler) GetUTXOsOutputByLocationBatch(ctx *fiber.Ctx) (err error) {
var req getUTXOsOutputByLocationBatchRequest
if err := ctx.BodyParser(&req); err != nil {
return errors.WithStack(err)
}
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
processQuery := func(ctx context.Context, query getUTXOsOutputByLocationQuery, queryIndex int) (*utxoItem, error) {
txHash, err := chainhash.NewHashFromStr(query.TxHash)
if err != nil {
return nil, errs.WithPublicMessage(err, fmt.Sprintf("unable to parse txHash from \"queries[%d].txHash\"", queryIndex))
}
utxo, err := h.usecase.GetUTXOsOutputByLocation(ctx, *txHash, uint32(query.OutputIndex))
if err != nil {
if errors.Is(err, usecase.ErrUTXONotFound) {
return nil, errs.NewPublicError(fmt.Sprintf("utxo not found for queries[%d]", queryIndex))
}
return nil, errors.WithStack(err)
}
runeIds := make(map[runes.RuneId]struct{}, 0)
for _, balance := range utxo.RuneBalances {
runeIds[balance.RuneId] = struct{}{}
}
runeIdsList := lo.Keys(runeIds)
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx, runeIdsList)
if err != nil {
if errors.Is(err, errs.NotFound) {
return nil, errs.NewPublicError(fmt.Sprintf("rune entries not found for queries[%d]", queryIndex))
}
return nil, errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch")
}
runeBalances := make([]runeBalance, 0, len(utxo.RuneBalances))
for _, balance := range utxo.RuneBalances {
runeEntry := runeEntries[balance.RuneId]
runeBalances = append(runeBalances, runeBalance{
RuneId: balance.RuneId,
Rune: runeEntry.SpacedRune,
Symbol: string(runeEntry.Symbol),
Amount: balance.Amount,
Divisibility: runeEntry.Divisibility,
})
}
return &utxoItem{
TxHash: utxo.OutPoint.Hash,
OutputIndex: utxo.OutPoint.Index,
Sats: utxo.Sats,
Extend: utxoExtend{
Runes: runeBalances,
},
}, nil
}
results := make([]*utxoItem, len(req.Queries))
eg, ectx := errgroup.WithContext(ctx.UserContext())
for i, query := range req.Queries {
i := i
query := query
eg.Go(func() error {
result, err := processQuery(ectx, query, i)
if err != nil {
return errors.Wrapf(err, "error during processQuery for query %d", i)
}
results[i] = result
return nil
})
}
if err := eg.Wait(); err != nil {
return errors.WithStack(err)
}
resp := getUTXOsOutputByLocationBatchResponse{
Result: &getUTXOsOutputByLocationBatchResult{
List: results,
},
}
return errors.WithStack(ctx.JSON(resp))
}

View File

@@ -41,6 +41,10 @@ func resolvePkScript(network common.Network, wallet string) ([]byte, bool) {
return &chaincfg.MainNetParams
case common.NetworkTestnet:
return &chaincfg.TestNet3Params
case common.NetworkFractalMainnet:
return &chaincfg.MainNetParams
case common.NetworkFractalTestnet:
return &chaincfg.MainNetParams
}
panic("invalid network")
}()

View File

@@ -7,12 +7,14 @@ import (
func (h *HttpHandler) Mount(router fiber.Router) error {
r := router.Group("/v2/runes")
r.Post("/balances/wallet/batch", h.GetBalancesByAddressBatch)
r.Get("/balances/wallet/:wallet", h.GetBalancesByAddress)
r.Post("/balances/wallet/batch", h.GetBalancesBatch)
r.Get("/balances/wallet/:wallet", h.GetBalances)
r.Get("/transactions", h.GetTransactions)
r.Get("/holders/:id", h.GetHolders)
r.Get("/info/:id", h.GetTokenInfo)
r.Get("/utxos/wallet/:wallet", h.GetUTXOsByAddress)
r.Get("/utxos/wallet/:wallet", h.GetUTXOs)
r.Post("/utxos/output/batch", h.GetUTXOsOutputByLocationBatch)
r.Get("/utxos/output/:txHash", h.GetUTXOsOutputByLocation)
r.Get("/block", h.GetCurrentBlock)
return nil
}

View File

@@ -1,27 +0,0 @@
package runes
import (
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/core/types"
)
const (
Version = "v0.0.1"
DBVersion = 1
EventHashVersion = 1
)
var startingBlockHeader = map[common.Network]types.BlockHeader{
common.NetworkMainnet: {
Height: 839999,
Hash: *utils.Must(chainhash.NewHashFromStr("0000000000000000000172014ba58d66455762add0512355ad651207918494ab")),
PrevBlock: *utils.Must(chainhash.NewHashFromStr("00000000000000000001dcce6ce7c8a45872cafd1fb04732b447a14a91832591")),
},
common.NetworkTestnet: {
Height: 2583200,
Hash: *utils.Must(chainhash.NewHashFromStr("000000000006c5f0dfcd9e0e81f27f97a87aef82087ffe69cd3c390325bb6541")),
PrevBlock: *utils.Must(chainhash.NewHashFromStr("00000000000668f3bafac992f53424774515440cb47e1cb9e73af3f496139e28")),
},
}

View File

@@ -0,0 +1,126 @@
package constants
import (
"fmt"
"time"
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/uint128"
"github.com/samber/lo"
)
const (
Version = "v0.0.1"
DBVersion = 1
EventHashVersion = 1
)
var StartingBlockHeader = map[common.Network]types.BlockHeader{
common.NetworkMainnet: {
Height: 839999,
Hash: *utils.Must(chainhash.NewHashFromStr("0000000000000000000172014ba58d66455762add0512355ad651207918494ab")),
},
common.NetworkTestnet: {
Height: 2519999,
Hash: *utils.Must(chainhash.NewHashFromStr("000000000006f45c16402f05d9075db49d3571cf5273cf4cbeaa2aa295f7c833")),
},
common.NetworkFractalMainnet: {
Height: 83999,
Hash: *utils.Must(chainhash.NewHashFromStr("0000000000000000000000000000000000000000000000000000000000000000")), // TODO: Update this to match real hash
},
common.NetworkFractalTestnet: {
Height: 83999,
Hash: *utils.Must(chainhash.NewHashFromStr("00000000000000613ddfbdd1778b17cea3818febcbbf82762eafaa9461038343")),
},
}
type GenesisRuneConfig struct {
RuneId runes.RuneId
Name string
Number uint64
Divisibility uint8
Premine uint128.Uint128
SpacedRune runes.SpacedRune
Symbol rune
Terms *runes.Terms
Turbo bool
EtchingTxHash chainhash.Hash
EtchedAt time.Time
}
var GenesisRuneConfigMap = map[common.Network]GenesisRuneConfig{
common.NetworkMainnet: {
RuneId: runes.RuneId{BlockHeight: 1, TxIndex: 0},
Number: 0,
Divisibility: 0,
Premine: uint128.Zero,
SpacedRune: runes.NewSpacedRune(runes.NewRune(2055900680524219742), 0b10000000),
Symbol: '\u29c9',
Terms: &runes.Terms{
Amount: lo.ToPtr(uint128.From64(1)),
Cap: &uint128.Max,
HeightStart: lo.ToPtr(uint64(840000)),
HeightEnd: lo.ToPtr(uint64(1050000)),
OffsetStart: nil,
OffsetEnd: nil,
},
Turbo: true,
EtchingTxHash: chainhash.Hash{},
EtchedAt: time.Time{},
},
common.NetworkFractalMainnet: {
RuneId: runes.RuneId{BlockHeight: 1, TxIndex: 0},
Number: 0,
Divisibility: 0,
Premine: uint128.Zero,
SpacedRune: runes.NewSpacedRune(runes.NewRune(2055900680524219742), 0b10000000),
Symbol: '\u29c9',
Terms: &runes.Terms{
Amount: lo.ToPtr(uint128.From64(1)),
Cap: &uint128.Max,
HeightStart: lo.ToPtr(uint64(84000)),
HeightEnd: lo.ToPtr(uint64(2184000)),
OffsetStart: nil,
OffsetEnd: nil,
},
Turbo: true,
EtchingTxHash: chainhash.Hash{},
EtchedAt: time.Time{},
},
common.NetworkFractalTestnet: {
RuneId: runes.RuneId{BlockHeight: 1, TxIndex: 0},
Number: 0,
Divisibility: 0,
Premine: uint128.Zero,
SpacedRune: runes.NewSpacedRune(runes.NewRune(2055900680524219742), 0b10000000),
Symbol: '\u29c9',
Terms: &runes.Terms{
Amount: lo.ToPtr(uint128.From64(1)),
Cap: &uint128.Max,
HeightStart: lo.ToPtr(uint64(84000)),
HeightEnd: lo.ToPtr(uint64(2184000)),
OffsetStart: nil,
OffsetEnd: nil,
},
Turbo: true,
EtchingTxHash: chainhash.Hash{},
EtchedAt: time.Time{},
},
}
func NetworkHasGenesisRune(network common.Network) bool {
switch network {
case common.NetworkMainnet, common.NetworkFractalMainnet, common.NetworkFractalTestnet:
return true
case common.NetworkTestnet:
return false
default:
logger.Panic(fmt.Sprintf("unsupported network: %s", network))
return false
}
}

View File

@@ -118,5 +118,7 @@ CREATE TABLE IF NOT EXISTS "runes_balances" (
"amount" DECIMAL NOT NULL,
PRIMARY KEY ("pkscript", "rune_id", "block_height")
);
CREATE INDEX IF NOT EXISTS runes_balances_rune_id_block_height_idx ON "runes_balances" USING BTREE ("rune_id", "block_height");
CREATE INDEX IF NOT EXISTS runes_balances_pkscript_block_height_idx ON "runes_balances" USING BTREE ("pkscript", "block_height");
COMMIT;

View File

@@ -2,13 +2,13 @@
WITH balances AS (
SELECT DISTINCT ON (rune_id) * FROM runes_balances WHERE pkscript = $1 AND block_height <= $2 ORDER BY rune_id, block_height DESC
)
SELECT * FROM balances WHERE amount > 0;
SELECT * FROM balances WHERE amount > 0 ORDER BY amount DESC, rune_id LIMIT $3 OFFSET $4;
-- name: GetBalancesByRuneId :many
WITH balances AS (
SELECT DISTINCT ON (pkscript) * FROM runes_balances WHERE rune_id = $1 AND block_height <= $2 ORDER BY pkscript, block_height DESC
)
SELECT * FROM balances WHERE amount > 0;
SELECT * FROM balances WHERE amount > 0 ORDER BY amount DESC, pkscript LIMIT $3 OFFSET $4;
-- name: GetBalanceByPkScriptAndRuneId :one
SELECT * FROM runes_balances WHERE pkscript = $1 AND rune_id = $2 AND block_height <= $3 ORDER BY block_height DESC LIMIT 1;
@@ -16,8 +16,28 @@ SELECT * FROM runes_balances WHERE pkscript = $1 AND rune_id = $2 AND block_heig
-- name: GetOutPointBalancesAtOutPoint :many
SELECT * FROM runes_outpoint_balances WHERE tx_hash = $1 AND tx_idx = $2;
-- name: GetUnspentOutPointBalancesByPkScript :many
SELECT * FROM runes_outpoint_balances WHERE pkscript = @pkScript AND block_height <= @block_height AND (spent_height IS NULL OR spent_height > @block_height);
-- name: GetRunesUTXOsByPkScript :many
SELECT tx_hash, tx_idx, max("pkscript") as pkscript, array_agg("rune_id") as rune_ids, array_agg("amount") as amounts
FROM runes_outpoint_balances
WHERE
pkscript = @pkScript AND
block_height <= @block_height AND
(spent_height IS NULL OR spent_height > @block_height)
GROUP BY tx_hash, tx_idx
ORDER BY tx_hash, tx_idx
LIMIT $1 OFFSET $2;
-- name: GetRunesUTXOsByRuneIdAndPkScript :many
SELECT tx_hash, tx_idx, max("pkscript") as pkscript, array_agg("rune_id") as rune_ids, array_agg("amount") as amounts
FROM runes_outpoint_balances
WHERE
pkscript = @pkScript AND
block_height <= @block_height AND
(spent_height IS NULL OR spent_height > @block_height)
GROUP BY tx_hash, tx_idx
HAVING array_agg("rune_id") @> @rune_ids::text[]
ORDER BY tx_hash, tx_idx
LIMIT $1 OFFSET $2;
-- name: GetRuneEntriesByRuneIds :many
WITH states AS (
@@ -57,7 +77,12 @@ SELECT * FROM runes_transactions
) AND (
@from_block <= runes_transactions.block_height AND runes_transactions.block_height <= @to_block
)
ORDER BY runes_transactions.block_height DESC LIMIT 10000;
ORDER BY runes_transactions.block_height DESC, runes_transactions.index DESC LIMIT $1 OFFSET $2;
-- name: GetRuneTransaction :one
SELECT * FROM runes_transactions
LEFT JOIN runes_runestones ON runes_transactions.hash = runes_runestones.tx_hash
WHERE hash = $1 LIMIT 1;
-- name: CountRuneEntries :one
SELECT COUNT(*) FROM runes_entries;

View File

@@ -3,6 +3,7 @@ package datagateway
import (
"context"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
@@ -27,10 +28,12 @@ type RunesReaderDataGateway interface {
GetLatestBlock(ctx context.Context) (types.BlockHeader, error)
GetIndexedBlockByHeight(ctx context.Context, height int64) (*entity.IndexedBlock, error)
// GetRuneTransactions returns the runes transactions, filterable by pkScript, runeId and height. If pkScript, runeId or height is zero value, that filter is ignored.
GetRuneTransactions(ctx context.Context, pkScript []byte, runeId runes.RuneId, fromBlock, toBlock uint64) ([]*entity.RuneTransaction, error)
GetRuneTransactions(ctx context.Context, pkScript []byte, runeId runes.RuneId, fromBlock, toBlock uint64, limit int32, offset int32) ([]*entity.RuneTransaction, error)
GetRuneTransaction(ctx context.Context, txHash chainhash.Hash) (*entity.RuneTransaction, error)
GetRunesBalancesAtOutPoint(ctx context.Context, outPoint wire.OutPoint) (map[runes.RuneId]*entity.OutPointBalance, error)
GetUnspentOutPointBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) ([]*entity.OutPointBalance, error)
GetRunesUTXOsByRuneIdAndPkScript(ctx context.Context, runeId runes.RuneId, pkScript []byte, blockHeight uint64, limit int32, offset int32) ([]*entity.RunesUTXO, error)
GetRunesUTXOsByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64, limit int32, offset int32) ([]*entity.RunesUTXO, error)
// GetRuneIdFromRune returns the RuneId for the given rune. Returns errs.NotFound if the rune entry is not found.
GetRuneIdFromRune(ctx context.Context, rune runes.Rune) (runes.RuneId, error)
// GetRuneEntryByRuneId returns the RuneEntry for the given runeId. Returns errs.NotFound if the rune entry is not found.
@@ -45,10 +48,12 @@ type RunesReaderDataGateway interface {
CountRuneEntries(ctx context.Context) (uint64, error)
// GetBalancesByPkScript returns the balances for the given pkScript at the given blockHeight.
GetBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) (map[runes.RuneId]*entity.Balance, error)
// Use limit = -1 as no limit.
GetBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64, limit int32, offset int32) ([]*entity.Balance, error)
// GetBalancesByRuneId returns the balances for the given runeId at the given blockHeight.
// Cannot use []byte as map key, so we're returning as slice.
GetBalancesByRuneId(ctx context.Context, runeId runes.RuneId, blockHeight uint64) ([]*entity.Balance, error)
// Use limit = -1 as no limit.
GetBalancesByRuneId(ctx context.Context, runeId runes.RuneId, blockHeight uint64, limit int32, offset int32) ([]*entity.Balance, error)
// GetBalancesByPkScriptAndRuneId returns the balance for the given pkScript and runeId at the given blockHeight.
GetBalanceByPkScriptAndRuneId(ctx context.Context, pkScript []byte, runeId runes.RuneId, blockHeight uint64) (*entity.Balance, error)
}

View File

@@ -11,6 +11,7 @@ import (
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/runes/constants"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
@@ -28,7 +29,7 @@ func (p *Processor) calculateEventHash(header types.BlockHeader) (chainhash.Hash
func (p *Processor) getHashPayload(header types.BlockHeader) ([]byte, error) {
var sb strings.Builder
sb.WriteString("payload:v" + strconv.Itoa(EventHashVersion) + ":")
sb.WriteString("payload:v" + strconv.Itoa(constants.EventHashVersion) + ":")
sb.WriteString("blockHash:")
sb.Write(header.Hash[:])

View File

@@ -0,0 +1,23 @@
package entity
import (
"github.com/btcsuite/btcd/wire"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
)
type RunesUTXOBalance struct {
RuneId runes.RuneId
Amount uint128.Uint128
}
type RunesUTXO struct {
PkScript []byte
OutPoint wire.OutPoint
RuneBalances []RunesUTXOBalance
}
type RunesUTXOWithSats struct {
RunesUTXO
Sats int64
}

View File

@@ -4,13 +4,13 @@ import (
"context"
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/indexer"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/runes/constants"
"github.com/gaze-network/indexer-network/modules/runes/datagateway"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/runes"
@@ -19,7 +19,6 @@ import (
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
"github.com/gaze-network/indexer-network/pkg/reportingclient"
"github.com/gaze-network/uint128"
"github.com/samber/lo"
)
// Make sure to implement the Bitcoin Processor interface
@@ -68,8 +67,8 @@ func (p *Processor) VerifyStates(ctx context.Context) error {
if err := p.ensureValidState(ctx); err != nil {
return errors.Wrap(err, "error during ensureValidState")
}
if p.network == common.NetworkMainnet {
if err := p.ensureGenesisRune(ctx); err != nil {
if constants.NetworkHasGenesisRune(p.network) {
if err := p.ensureGenesisRune(ctx, p.network); err != nil {
return errors.Wrap(err, "error during ensureGenesisRune")
}
}
@@ -89,17 +88,17 @@ func (p *Processor) ensureValidState(ctx context.Context) error {
// if not found, set indexer state
if errors.Is(err, errs.NotFound) {
if err := p.indexerInfoDg.SetIndexerState(ctx, entity.IndexerState{
DBVersion: DBVersion,
EventHashVersion: EventHashVersion,
DBVersion: constants.DBVersion,
EventHashVersion: constants.EventHashVersion,
}); err != nil {
return errors.Wrap(err, "failed to set indexer state")
}
} else {
if indexerState.DBVersion != DBVersion {
return errors.Wrapf(errs.ConflictSetting, "db version mismatch: current version is %d. Please upgrade to version %d", indexerState.DBVersion, DBVersion)
if indexerState.DBVersion != constants.DBVersion {
return errors.Wrapf(errs.ConflictSetting, "db version mismatch: current version is %d. Please upgrade to version %d", indexerState.DBVersion, constants.DBVersion)
}
if indexerState.EventHashVersion != EventHashVersion {
return errors.Wrapf(errs.ConflictSetting, "event version mismatch: current version is %d. Please reset rune's db first.", indexerState.EventHashVersion, EventHashVersion)
if indexerState.EventHashVersion != constants.EventHashVersion {
return errors.Wrapf(errs.ConflictSetting, "event version mismatch: current version is %d. Please reset rune's db first.", indexerState.EventHashVersion, constants.EventHashVersion)
}
}
@@ -119,39 +118,34 @@ func (p *Processor) ensureValidState(ctx context.Context) error {
return nil
}
var genesisRuneId = runes.RuneId{BlockHeight: 1, TxIndex: 0}
func (p *Processor) ensureGenesisRune(ctx context.Context) error {
_, err := p.runesDg.GetRuneEntryByRuneId(ctx, genesisRuneId)
func (p *Processor) ensureGenesisRune(ctx context.Context, network common.Network) error {
genesisRuneConfig, ok := constants.GenesisRuneConfigMap[network]
if !ok {
logger.Panic("genesis rune config not found", slogx.Stringer("network", network))
}
_, err := p.runesDg.GetRuneEntryByRuneId(ctx, genesisRuneConfig.RuneId)
if err != nil && !errors.Is(err, errs.NotFound) {
return errors.Wrap(err, "failed to get genesis rune entry")
}
if errors.Is(err, errs.NotFound) {
runeEntry := &runes.RuneEntry{
RuneId: genesisRuneId,
Number: 0,
Divisibility: 0,
Premine: uint128.Zero,
SpacedRune: runes.NewSpacedRune(runes.NewRune(2055900680524219742), 0b10000000),
Symbol: '\u29c9',
Terms: &runes.Terms{
Amount: lo.ToPtr(uint128.From64(1)),
Cap: &uint128.Max,
HeightStart: lo.ToPtr(uint64(common.HalvingInterval * 4)),
HeightEnd: lo.ToPtr(uint64(common.HalvingInterval * 5)),
OffsetStart: nil,
OffsetEnd: nil,
},
Turbo: true,
RuneId: genesisRuneConfig.RuneId,
Number: genesisRuneConfig.Number,
Divisibility: genesisRuneConfig.Divisibility,
Premine: genesisRuneConfig.Premine,
SpacedRune: genesisRuneConfig.SpacedRune,
Symbol: genesisRuneConfig.Symbol,
Terms: genesisRuneConfig.Terms,
Turbo: genesisRuneConfig.Turbo,
Mints: uint128.Zero,
BurnedAmount: uint128.Zero,
CompletedAt: time.Time{},
CompletedAtHeight: nil,
EtchingBlock: 1,
EtchingTxHash: chainhash.Hash{},
EtchedAt: time.Time{},
EtchingBlock: genesisRuneConfig.RuneId.BlockHeight,
EtchingTxHash: genesisRuneConfig.EtchingTxHash,
EtchedAt: genesisRuneConfig.EtchedAt,
}
if err := p.runesDg.CreateRuneEntry(ctx, runeEntry, genesisRuneId.BlockHeight); err != nil {
if err := p.runesDg.CreateRuneEntry(ctx, runeEntry, genesisRuneConfig.RuneId.BlockHeight); err != nil {
return errors.Wrap(err, "failed to create genesis rune entry")
}
}
@@ -166,7 +160,7 @@ func (p *Processor) CurrentBlock(ctx context.Context) (types.BlockHeader, error)
blockHeader, err := p.runesDg.GetLatestBlock(ctx)
if err != nil {
if errors.Is(err, errs.NotFound) {
return startingBlockHeader[p.network], nil
return constants.StartingBlockHeader[p.network], nil
}
return types.BlockHeader{}, errors.Wrap(err, "failed to get latest block")
}

View File

@@ -13,6 +13,7 @@ import (
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/runes/constants"
"github.com/gaze-network/indexer-network/modules/runes/datagateway"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/runes"
@@ -687,10 +688,10 @@ func (p *Processor) flushBlock(ctx context.Context, blockHeader types.BlockHeade
return errors.Wrap(err, "failed to calculate event hash")
}
prevIndexedBlock, err := runesDgTx.GetIndexedBlockByHeight(ctx, blockHeader.Height-1)
if err != nil && errors.Is(err, errs.NotFound) && blockHeader.Height-1 == startingBlockHeader[p.network].Height {
if err != nil && errors.Is(err, errs.NotFound) && blockHeader.Height-1 == constants.StartingBlockHeader[p.network].Height {
prevIndexedBlock = &entity.IndexedBlock{
Height: startingBlockHeader[p.network].Height,
Hash: startingBlockHeader[p.network].Hash,
Height: constants.StartingBlockHeader[p.network].Height,
Hash: constants.StartingBlockHeader[p.network].Hash,
EventHash: chainhash.Hash{},
CumulativeEventHash: chainhash.Hash{},
}
@@ -791,9 +792,9 @@ func (p *Processor) flushBlock(ctx context.Context, blockHeader types.BlockHeade
if p.reportingClient != nil {
if err := p.reportingClient.SubmitBlockReport(ctx, reportingclient.SubmitBlockReportPayload{
Type: "runes",
ClientVersion: Version,
DBVersion: DBVersion,
EventHashVersion: EventHashVersion,
ClientVersion: constants.Version,
DBVersion: constants.DBVersion,
EventHashVersion: constants.EventHashVersion,
Network: p.network,
BlockHeight: uint64(blockHeader.Height),
BlockHash: blockHeader.Hash,

View File

@@ -296,12 +296,14 @@ const getBalancesByPkScript = `-- name: GetBalancesByPkScript :many
WITH balances AS (
SELECT DISTINCT ON (rune_id) pkscript, block_height, rune_id, amount FROM runes_balances WHERE pkscript = $1 AND block_height <= $2 ORDER BY rune_id, block_height DESC
)
SELECT pkscript, block_height, rune_id, amount FROM balances WHERE amount > 0
SELECT pkscript, block_height, rune_id, amount FROM balances WHERE amount > 0 ORDER BY amount DESC, rune_id LIMIT $3 OFFSET $4
`
type GetBalancesByPkScriptParams struct {
Pkscript string
BlockHeight int32
Limit int32
Offset int32
}
type GetBalancesByPkScriptRow struct {
@@ -312,7 +314,12 @@ type GetBalancesByPkScriptRow struct {
}
func (q *Queries) GetBalancesByPkScript(ctx context.Context, arg GetBalancesByPkScriptParams) ([]GetBalancesByPkScriptRow, error) {
rows, err := q.db.Query(ctx, getBalancesByPkScript, arg.Pkscript, arg.BlockHeight)
rows, err := q.db.Query(ctx, getBalancesByPkScript,
arg.Pkscript,
arg.BlockHeight,
arg.Limit,
arg.Offset,
)
if err != nil {
return nil, err
}
@@ -340,12 +347,14 @@ const getBalancesByRuneId = `-- name: GetBalancesByRuneId :many
WITH balances AS (
SELECT DISTINCT ON (pkscript) pkscript, block_height, rune_id, amount FROM runes_balances WHERE rune_id = $1 AND block_height <= $2 ORDER BY pkscript, block_height DESC
)
SELECT pkscript, block_height, rune_id, amount FROM balances WHERE amount > 0
SELECT pkscript, block_height, rune_id, amount FROM balances WHERE amount > 0 ORDER BY amount DESC, pkscript LIMIT $3 OFFSET $4
`
type GetBalancesByRuneIdParams struct {
RuneID string
BlockHeight int32
Limit int32
Offset int32
}
type GetBalancesByRuneIdRow struct {
@@ -356,7 +365,12 @@ type GetBalancesByRuneIdRow struct {
}
func (q *Queries) GetBalancesByRuneId(ctx context.Context, arg GetBalancesByRuneIdParams) ([]GetBalancesByRuneIdRow, error) {
rows, err := q.db.Query(ctx, getBalancesByRuneId, arg.RuneID, arg.BlockHeight)
rows, err := q.db.Query(ctx, getBalancesByRuneId,
arg.RuneID,
arg.BlockHeight,
arg.Limit,
arg.Offset,
)
if err != nil {
return nil, err
}
@@ -631,27 +645,106 @@ func (q *Queries) GetRuneIdFromRune(ctx context.Context, rune string) (string, e
return rune_id, err
}
const getRuneTransaction = `-- name: GetRuneTransaction :one
SELECT hash, runes_transactions.block_height, index, timestamp, inputs, outputs, mints, burns, rune_etched, tx_hash, runes_runestones.block_height, etching, etching_divisibility, etching_premine, etching_rune, etching_spacers, etching_symbol, etching_terms, etching_terms_amount, etching_terms_cap, etching_terms_height_start, etching_terms_height_end, etching_terms_offset_start, etching_terms_offset_end, etching_turbo, edicts, mint, pointer, cenotaph, flaws FROM runes_transactions
LEFT JOIN runes_runestones ON runes_transactions.hash = runes_runestones.tx_hash
WHERE hash = $1 LIMIT 1
`
type GetRuneTransactionRow struct {
Hash string
BlockHeight int32
Index int32
Timestamp pgtype.Timestamp
Inputs []byte
Outputs []byte
Mints []byte
Burns []byte
RuneEtched bool
TxHash pgtype.Text
BlockHeight_2 pgtype.Int4
Etching pgtype.Bool
EtchingDivisibility pgtype.Int2
EtchingPremine pgtype.Numeric
EtchingRune pgtype.Text
EtchingSpacers pgtype.Int4
EtchingSymbol pgtype.Int4
EtchingTerms pgtype.Bool
EtchingTermsAmount pgtype.Numeric
EtchingTermsCap pgtype.Numeric
EtchingTermsHeightStart pgtype.Int4
EtchingTermsHeightEnd pgtype.Int4
EtchingTermsOffsetStart pgtype.Int4
EtchingTermsOffsetEnd pgtype.Int4
EtchingTurbo pgtype.Bool
Edicts []byte
Mint pgtype.Text
Pointer pgtype.Int4
Cenotaph pgtype.Bool
Flaws pgtype.Int4
}
func (q *Queries) GetRuneTransaction(ctx context.Context, hash string) (GetRuneTransactionRow, error) {
row := q.db.QueryRow(ctx, getRuneTransaction, hash)
var i GetRuneTransactionRow
err := row.Scan(
&i.Hash,
&i.BlockHeight,
&i.Index,
&i.Timestamp,
&i.Inputs,
&i.Outputs,
&i.Mints,
&i.Burns,
&i.RuneEtched,
&i.TxHash,
&i.BlockHeight_2,
&i.Etching,
&i.EtchingDivisibility,
&i.EtchingPremine,
&i.EtchingRune,
&i.EtchingSpacers,
&i.EtchingSymbol,
&i.EtchingTerms,
&i.EtchingTermsAmount,
&i.EtchingTermsCap,
&i.EtchingTermsHeightStart,
&i.EtchingTermsHeightEnd,
&i.EtchingTermsOffsetStart,
&i.EtchingTermsOffsetEnd,
&i.EtchingTurbo,
&i.Edicts,
&i.Mint,
&i.Pointer,
&i.Cenotaph,
&i.Flaws,
)
return i, err
}
const getRuneTransactions = `-- name: GetRuneTransactions :many
SELECT hash, runes_transactions.block_height, index, timestamp, inputs, outputs, mints, burns, rune_etched, tx_hash, runes_runestones.block_height, etching, etching_divisibility, etching_premine, etching_rune, etching_spacers, etching_symbol, etching_terms, etching_terms_amount, etching_terms_cap, etching_terms_height_start, etching_terms_height_end, etching_terms_offset_start, etching_terms_offset_end, etching_turbo, edicts, mint, pointer, cenotaph, flaws FROM runes_transactions
LEFT JOIN runes_runestones ON runes_transactions.hash = runes_runestones.tx_hash
WHERE (
$1::BOOLEAN = FALSE -- if @filter_pk_script is TRUE, apply pk_script filter
OR runes_transactions.outputs @> $2::JSONB
OR runes_transactions.inputs @> $2::JSONB
) AND (
$3::BOOLEAN = FALSE -- if @filter_rune_id is TRUE, apply rune_id filter
$3::BOOLEAN = FALSE -- if @filter_pk_script is TRUE, apply pk_script filter
OR runes_transactions.outputs @> $4::JSONB
OR runes_transactions.inputs @> $4::JSONB
OR runes_transactions.mints ? $5
OR runes_transactions.burns ? $5
OR (runes_transactions.rune_etched = TRUE AND runes_transactions.block_height = $6 AND runes_transactions.index = $7)
OR runes_transactions.inputs @> $4::JSONB
) AND (
$8 <= runes_transactions.block_height AND runes_transactions.block_height <= $9
$5::BOOLEAN = FALSE -- if @filter_rune_id is TRUE, apply rune_id filter
OR runes_transactions.outputs @> $6::JSONB
OR runes_transactions.inputs @> $6::JSONB
OR runes_transactions.mints ? $7
OR runes_transactions.burns ? $7
OR (runes_transactions.rune_etched = TRUE AND runes_transactions.block_height = $8 AND runes_transactions.index = $9)
) AND (
$10 <= runes_transactions.block_height AND runes_transactions.block_height <= $11
)
ORDER BY runes_transactions.block_height DESC LIMIT 10000
ORDER BY runes_transactions.block_height DESC, runes_transactions.index DESC LIMIT $1 OFFSET $2
`
type GetRuneTransactionsParams struct {
Limit int32
Offset int32
FilterPkScript bool
PkScriptParam []byte
FilterRuneID bool
@@ -698,6 +791,8 @@ type GetRuneTransactionsRow struct {
func (q *Queries) GetRuneTransactions(ctx context.Context, arg GetRuneTransactionsParams) ([]GetRuneTransactionsRow, error) {
rows, err := q.db.Query(ctx, getRuneTransactions,
arg.Limit,
arg.Offset,
arg.FilterPkScript,
arg.PkScriptParam,
arg.FilterRuneID,
@@ -757,32 +852,114 @@ func (q *Queries) GetRuneTransactions(ctx context.Context, arg GetRuneTransactio
return items, nil
}
const getUnspentOutPointBalancesByPkScript = `-- name: GetUnspentOutPointBalancesByPkScript :many
SELECT rune_id, pkscript, tx_hash, tx_idx, amount, block_height, spent_height FROM runes_outpoint_balances WHERE pkscript = $1 AND block_height <= $2 AND (spent_height IS NULL OR spent_height > $2)
const getRunesUTXOsByPkScript = `-- name: GetRunesUTXOsByPkScript :many
SELECT tx_hash, tx_idx, max("pkscript") as pkscript, array_agg("rune_id") as rune_ids, array_agg("amount") as amounts
FROM runes_outpoint_balances
WHERE
pkscript = $3 AND
block_height <= $4 AND
(spent_height IS NULL OR spent_height > $4)
GROUP BY tx_hash, tx_idx
ORDER BY tx_hash, tx_idx
LIMIT $1 OFFSET $2
`
type GetUnspentOutPointBalancesByPkScriptParams struct {
type GetRunesUTXOsByPkScriptParams struct {
Limit int32
Offset int32
Pkscript string
BlockHeight int32
}
func (q *Queries) GetUnspentOutPointBalancesByPkScript(ctx context.Context, arg GetUnspentOutPointBalancesByPkScriptParams) ([]RunesOutpointBalance, error) {
rows, err := q.db.Query(ctx, getUnspentOutPointBalancesByPkScript, arg.Pkscript, arg.BlockHeight)
type GetRunesUTXOsByPkScriptRow struct {
TxHash string
TxIdx int32
Pkscript interface{}
RuneIds interface{}
Amounts interface{}
}
func (q *Queries) GetRunesUTXOsByPkScript(ctx context.Context, arg GetRunesUTXOsByPkScriptParams) ([]GetRunesUTXOsByPkScriptRow, error) {
rows, err := q.db.Query(ctx, getRunesUTXOsByPkScript,
arg.Limit,
arg.Offset,
arg.Pkscript,
arg.BlockHeight,
)
if err != nil {
return nil, err
}
defer rows.Close()
var items []RunesOutpointBalance
var items []GetRunesUTXOsByPkScriptRow
for rows.Next() {
var i RunesOutpointBalance
var i GetRunesUTXOsByPkScriptRow
if err := rows.Scan(
&i.RuneID,
&i.Pkscript,
&i.TxHash,
&i.TxIdx,
&i.Amount,
&i.BlockHeight,
&i.SpentHeight,
&i.Pkscript,
&i.RuneIds,
&i.Amounts,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getRunesUTXOsByRuneIdAndPkScript = `-- name: GetRunesUTXOsByRuneIdAndPkScript :many
SELECT tx_hash, tx_idx, max("pkscript") as pkscript, array_agg("rune_id") as rune_ids, array_agg("amount") as amounts
FROM runes_outpoint_balances
WHERE
pkscript = $3 AND
block_height <= $4 AND
(spent_height IS NULL OR spent_height > $4)
GROUP BY tx_hash, tx_idx
HAVING array_agg("rune_id") @> $5::text[]
ORDER BY tx_hash, tx_idx
LIMIT $1 OFFSET $2
`
type GetRunesUTXOsByRuneIdAndPkScriptParams struct {
Limit int32
Offset int32
Pkscript string
BlockHeight int32
RuneIds []string
}
type GetRunesUTXOsByRuneIdAndPkScriptRow struct {
TxHash string
TxIdx int32
Pkscript interface{}
RuneIds interface{}
Amounts interface{}
}
func (q *Queries) GetRunesUTXOsByRuneIdAndPkScript(ctx context.Context, arg GetRunesUTXOsByRuneIdAndPkScriptParams) ([]GetRunesUTXOsByRuneIdAndPkScriptRow, error) {
rows, err := q.db.Query(ctx, getRunesUTXOsByRuneIdAndPkScript,
arg.Limit,
arg.Offset,
arg.Pkscript,
arg.BlockHeight,
arg.RuneIds,
)
if err != nil {
return nil, err
}
defer rows.Close()
var items []GetRunesUTXOsByRuneIdAndPkScriptRow
for rows.Next() {
var i GetRunesUTXOsByRuneIdAndPkScriptRow
if err := rows.Scan(
&i.TxHash,
&i.TxIdx,
&i.Pkscript,
&i.RuneIds,
&i.Amounts,
); err != nil {
return nil, err
}

View File

@@ -638,6 +638,72 @@ func mapIndexedBlockTypeToParams(src entity.IndexedBlock) (gen.CreateIndexedBloc
}, nil
}
func mapRunesUTXOModelToType(src gen.GetRunesUTXOsByPkScriptRow) (entity.RunesUTXO, error) {
pkScriptRaw, ok := src.Pkscript.(string)
if !ok {
return entity.RunesUTXO{}, errors.New("pkscript from database is not string")
}
pkScript, err := hex.DecodeString(pkScriptRaw)
if err != nil {
return entity.RunesUTXO{}, errors.Wrap(err, "failed to parse pkscript")
}
txHash, err := chainhash.NewHashFromStr(src.TxHash)
if err != nil {
return entity.RunesUTXO{}, errors.Wrap(err, "failed to parse tx hash")
}
runeIdsRaw, ok := src.RuneIds.([]interface{})
if !ok {
return entity.RunesUTXO{}, errors.New("src.RuneIds is not a slice")
}
runeIds := make([]string, 0, len(runeIdsRaw))
for i, raw := range runeIdsRaw {
runeId, ok := raw.(string)
if !ok {
return entity.RunesUTXO{}, errors.Errorf("src.RuneIds[%d] is not a string", i)
}
runeIds = append(runeIds, runeId)
}
amountsRaw, ok := src.Amounts.([]interface{})
if !ok {
return entity.RunesUTXO{}, errors.New("amounts from database is not a slice")
}
amounts := make([]pgtype.Numeric, 0, len(amountsRaw))
for i, raw := range amountsRaw {
amount, ok := raw.(pgtype.Numeric)
if !ok {
return entity.RunesUTXO{}, errors.Errorf("src.Amounts[%d] is not pgtype.Numeric", i)
}
amounts = append(amounts, amount)
}
if len(runeIds) != len(amounts) {
return entity.RunesUTXO{}, errors.New("rune ids and amounts have different lengths")
}
runesBalances := make([]entity.RunesUTXOBalance, 0, len(runeIds))
for i := range runeIds {
runeId, err := runes.NewRuneIdFromString(runeIds[i])
if err != nil {
return entity.RunesUTXO{}, errors.Wrap(err, "failed to parse rune id")
}
amount, err := uint128FromNumeric(amounts[i])
if err != nil {
return entity.RunesUTXO{}, errors.Wrap(err, "failed to parse amount")
}
runesBalances = append(runesBalances, entity.RunesUTXOBalance{
RuneId: runeId,
Amount: lo.FromPtr(amount),
})
}
return entity.RunesUTXO{
PkScript: pkScript,
OutPoint: wire.OutPoint{
Hash: *txHash,
Index: uint32(src.TxIdx),
},
RuneBalances: runesBalances,
}, nil
}
func mapOutPointBalanceModelToType(src gen.RunesOutpointBalance) (entity.OutPointBalance, error) {
runeId, err := runes.NewRuneIdFromString(src.RuneID)
if err != nil {

View File

@@ -4,6 +4,7 @@ import (
"context"
"encoding/hex"
"fmt"
"math"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
@@ -62,7 +63,18 @@ func (r *Repository) GetIndexedBlockByHeight(ctx context.Context, height int64)
return indexedBlock, nil
}
func (r *Repository) GetRuneTransactions(ctx context.Context, pkScript []byte, runeId runes.RuneId, fromBlock, toBlock uint64) ([]*entity.RuneTransaction, error) {
const maxRuneTransactionsLimit = 10000 // temporary limit to prevent large queries from overwhelming the database
func (r *Repository) GetRuneTransactions(ctx context.Context, pkScript []byte, runeId runes.RuneId, fromBlock, toBlock uint64, limit int32, offset int32) ([]*entity.RuneTransaction, error) {
if limit == -1 {
limit = maxRuneTransactionsLimit
}
if limit < 0 {
return nil, errors.Wrap(errs.InvalidArgument, "limit must be -1 or non-negative")
}
if limit > maxRuneTransactionsLimit {
return nil, errors.Wrapf(errs.InvalidArgument, "limit cannot exceed %d", maxRuneTransactionsLimit)
}
pkScriptParam := []byte(fmt.Sprintf(`[{"pkScript":"%s"}]`, hex.EncodeToString(pkScript)))
runeIdParam := []byte(fmt.Sprintf(`[{"runeId":"%s"}]`, runeId.String()))
rows, err := r.queries.GetRuneTransactions(ctx, gen.GetRuneTransactionsParams{
@@ -77,6 +89,9 @@ func (r *Repository) GetRuneTransactions(ctx context.Context, pkScript []byte, r
FromBlock: int32(fromBlock),
ToBlock: int32(toBlock),
Limit: limit,
Offset: offset,
})
if err != nil {
return nil, errors.Wrap(err, "error during query")
@@ -105,6 +120,33 @@ func (r *Repository) GetRuneTransactions(ctx context.Context, pkScript []byte, r
return runeTxs, nil
}
func (r *Repository) GetRuneTransaction(ctx context.Context, txHash chainhash.Hash) (*entity.RuneTransaction, error) {
row, err := r.queries.GetRuneTransaction(ctx, txHash.String())
if errors.Is(err, pgx.ErrNoRows) {
return nil, errors.WithStack(errs.NotFound)
}
runeTxModel, runestoneModel, err := extractModelRuneTxAndRunestone(gen.GetRuneTransactionsRow(row))
if err != nil {
return nil, errors.Wrap(err, "failed to extract rune transaction and runestone from row")
}
runeTx, err := mapRuneTransactionModelToType(runeTxModel)
if err != nil {
return nil, errors.Wrap(err, "failed to parse rune transaction model")
}
if runestoneModel != nil {
runestone, err := mapRunestoneModelToType(*runestoneModel)
if err != nil {
return nil, errors.Wrap(err, "failed to parse runestone model")
}
runeTx.Runestone = &runestone
}
return &runeTx, nil
}
func (r *Repository) GetRunesBalancesAtOutPoint(ctx context.Context, outPoint wire.OutPoint) (map[runes.RuneId]*entity.OutPointBalance, error) {
balances, err := r.queries.GetOutPointBalancesAtOutPoint(ctx, gen.GetOutPointBalancesAtOutPointParams{
TxHash: outPoint.Hash.String(),
@@ -125,22 +167,59 @@ func (r *Repository) GetRunesBalancesAtOutPoint(ctx context.Context, outPoint wi
return result, nil
}
func (r *Repository) GetUnspentOutPointBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) ([]*entity.OutPointBalance, error) {
balances, err := r.queries.GetUnspentOutPointBalancesByPkScript(ctx, gen.GetUnspentOutPointBalancesByPkScriptParams{
func (r *Repository) GetRunesUTXOsByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64, limit int32, offset int32) ([]*entity.RunesUTXO, error) {
if limit == -1 {
limit = math.MaxInt32
}
if limit < 0 {
return nil, errors.Wrap(errs.InvalidArgument, "limit must be -1 or non-negative")
}
rows, err := r.queries.GetRunesUTXOsByPkScript(ctx, gen.GetRunesUTXOsByPkScriptParams{
Pkscript: hex.EncodeToString(pkScript),
BlockHeight: int32(blockHeight),
Limit: limit,
Offset: offset,
})
if err != nil {
return nil, errors.Wrap(err, "error during query")
}
result := make([]*entity.OutPointBalance, 0, len(balances))
for _, balanceModel := range balances {
balance, err := mapOutPointBalanceModelToType(balanceModel)
result := make([]*entity.RunesUTXO, 0, len(rows))
for _, row := range rows {
utxo, err := mapRunesUTXOModelToType(row)
if err != nil {
return nil, errors.Wrap(err, "failed to parse balance model")
return nil, errors.Wrap(err, "failed to parse row model")
}
result = append(result, &balance)
result = append(result, &utxo)
}
return result, nil
}
func (r *Repository) GetRunesUTXOsByRuneIdAndPkScript(ctx context.Context, runeId runes.RuneId, pkScript []byte, blockHeight uint64, limit int32, offset int32) ([]*entity.RunesUTXO, error) {
if limit == -1 {
limit = math.MaxInt32
}
if limit < 0 {
return nil, errors.Wrap(errs.InvalidArgument, "limit must be -1 or non-negative")
}
rows, err := r.queries.GetRunesUTXOsByRuneIdAndPkScript(ctx, gen.GetRunesUTXOsByRuneIdAndPkScriptParams{
Pkscript: hex.EncodeToString(pkScript),
BlockHeight: int32(blockHeight),
RuneIds: []string{runeId.String()},
Limit: limit,
Offset: offset,
})
if err != nil {
return nil, errors.Wrap(err, "error during query")
}
result := make([]*entity.RunesUTXO, 0, len(rows))
for _, row := range rows {
utxo, err := mapRunesUTXOModelToType(gen.GetRunesUTXOsByPkScriptRow(row))
if err != nil {
return nil, errors.Wrap(err, "failed to parse row")
}
result = append(result, &utxo)
}
return result, nil
}
@@ -245,30 +324,46 @@ func (r *Repository) CountRuneEntries(ctx context.Context) (uint64, error) {
return uint64(count), nil
}
func (r *Repository) GetBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) (map[runes.RuneId]*entity.Balance, error) {
func (r *Repository) GetBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64, limit int32, offset int32) ([]*entity.Balance, error) {
if limit == -1 {
limit = math.MaxInt32
}
if limit < 0 {
return nil, errors.Wrap(errs.InvalidArgument, "limit must be -1 or non-negative")
}
balances, err := r.queries.GetBalancesByPkScript(ctx, gen.GetBalancesByPkScriptParams{
Pkscript: hex.EncodeToString(pkScript),
BlockHeight: int32(blockHeight),
Limit: limit,
Offset: offset,
})
if err != nil {
return nil, errors.Wrap(err, "error during query")
}
result := make(map[runes.RuneId]*entity.Balance, len(balances))
result := make([]*entity.Balance, 0, len(balances))
for _, balanceModel := range balances {
balance, err := mapBalanceModelToType(gen.RunesBalance(balanceModel))
if err != nil {
return nil, errors.Wrap(err, "failed to parse balance model")
}
result[balance.RuneId] = balance
result = append(result, balance)
}
return result, nil
}
func (r *Repository) GetBalancesByRuneId(ctx context.Context, runeId runes.RuneId, blockHeight uint64) ([]*entity.Balance, error) {
func (r *Repository) GetBalancesByRuneId(ctx context.Context, runeId runes.RuneId, blockHeight uint64, limit int32, offset int32) ([]*entity.Balance, error) {
if limit == -1 {
limit = math.MaxInt32
}
if limit < 0 {
return nil, errors.Wrap(errs.InvalidArgument, "limit must be -1 or non-negative")
}
balances, err := r.queries.GetBalancesByRuneId(ctx, gen.GetBalancesByRuneIdParams{
RuneID: runeId.String(),
BlockHeight: int32(blockHeight),
Limit: limit,
Offset: offset,
})
if err != nil {
return nil, errors.Wrap(err, "error during query")

View File

@@ -5,6 +5,7 @@ import (
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/uint128"
)
@@ -58,7 +59,8 @@ func ParseFlags(input interface{}) (Flags, error) {
}
return Flags(u128), nil
default:
panic("invalid flags input type")
logger.Panic("invalid flags input type")
return Flags{}, nil
}
}

View File

@@ -1,11 +1,13 @@
package runes
import (
"fmt"
"slices"
"github.com/Cleverse/go-utilities/utils"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/uint128"
)
@@ -29,6 +31,10 @@ var ErrInvalidBase26 = errors.New("invalid base-26 character: must be in the ran
func NewRuneFromString(value string) (Rune, error) {
n := uint128.From64(0)
for i, char := range value {
// skip spacers
if char == '.' || char == '•' {
continue
}
if i > 0 {
n = n.Add(uint128.From64(1))
}
@@ -115,20 +121,25 @@ func (r Rune) Cmp(other Rune) int {
func FirstRuneHeight(network common.Network) uint64 {
switch network {
case common.NetworkMainnet:
return common.HalvingInterval * 4
return 840_000
case common.NetworkTestnet:
return common.HalvingInterval * 12
return 2_520_000
case common.NetworkFractalMainnet:
return 84_000
case common.NetworkFractalTestnet:
return 84_000
}
panic("invalid network")
logger.Panic(fmt.Sprintf("invalid network: %s", network))
return 0
}
func MinimumRuneAtHeight(network common.Network, height uint64) Rune {
offset := height + 1
interval := common.HalvingInterval / 12
interval := network.HalvingInterval() / 12
// runes are gradually unlocked from rune activation height until the next halving
start := FirstRuneHeight(network)
end := start + common.HalvingInterval
end := start + network.HalvingInterval()
if offset < start {
return (Rune)(unlockSteps[12])

View File

@@ -92,8 +92,8 @@ func TestMinimumRuneAtHeightMainnet(t *testing.T) {
}
start := FirstRuneHeight(common.NetworkMainnet)
end := start + common.HalvingInterval
interval := uint64(common.HalvingInterval / 12)
end := start + common.NetworkMainnet.HalvingInterval()
interval := uint64(common.NetworkMainnet.HalvingInterval() / 12)
test(0, "AAAAAAAAAAAAA")
test(start/2, "AAAAAAAAAAAAA")

View File

@@ -5,6 +5,7 @@ import (
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/uint128"
)
@@ -102,6 +103,7 @@ func ParseTag(input interface{}) (Tag, error) {
}
return Tag(u128), nil
default:
panic("invalid tag input type")
logger.Panic("invalid tag input type")
return Tag{}, nil
}
}

View File

@@ -0,0 +1,5 @@
package usecase
import "github.com/cockroachdb/errors"
var ErrUTXONotFound = errors.New("utxo not found")

View File

@@ -8,16 +8,18 @@ import (
"github.com/gaze-network/indexer-network/modules/runes/runes"
)
func (u *Usecase) GetBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) (map[runes.RuneId]*entity.Balance, error) {
balances, err := u.runesDg.GetBalancesByPkScript(ctx, pkScript, blockHeight)
// Use limit = -1 as no limit.
func (u *Usecase) GetBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64, limit int32, offset int32) ([]*entity.Balance, error) {
balances, err := u.runesDg.GetBalancesByPkScript(ctx, pkScript, blockHeight, limit, offset)
if err != nil {
return nil, errors.Wrap(err, "error during GetBalancesByPkScript")
}
return balances, nil
}
func (u *Usecase) GetBalancesByRuneId(ctx context.Context, runeId runes.RuneId, blockHeight uint64) ([]*entity.Balance, error) {
balances, err := u.runesDg.GetBalancesByRuneId(ctx, runeId, blockHeight)
// Use limit = -1 as no limit.
func (u *Usecase) GetBalancesByRuneId(ctx context.Context, runeId runes.RuneId, blockHeight uint64, limit int32, offset int32) ([]*entity.Balance, error) {
balances, err := u.runesDg.GetBalancesByRuneId(ctx, runeId, blockHeight, limit, offset)
if err != nil {
return nil, errors.Wrap(err, "failed to get rune holders by rune id")
}

View File

@@ -1,16 +0,0 @@
package usecase
import (
"context"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
)
func (u *Usecase) GetUnspentOutPointBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) ([]*entity.OutPointBalance, error) {
balances, err := u.runesDg.GetUnspentOutPointBalancesByPkScript(ctx, pkScript, blockHeight)
if err != nil {
return nil, errors.Wrap(err, "error during GetBalancesByPkScript")
}
return balances, nil
}

View File

@@ -8,8 +8,9 @@ import (
"github.com/gaze-network/indexer-network/modules/runes/runes"
)
func (u *Usecase) GetRuneTransactions(ctx context.Context, pkScript []byte, runeId runes.RuneId, fromBlock, toBlock uint64) ([]*entity.RuneTransaction, error) {
txs, err := u.runesDg.GetRuneTransactions(ctx, pkScript, runeId, fromBlock, toBlock)
// Use limit = -1 as no limit.
func (u *Usecase) GetRuneTransactions(ctx context.Context, pkScript []byte, runeId runes.RuneId, fromBlock, toBlock uint64, limit int32, offset int32) ([]*entity.RuneTransaction, error) {
txs, err := u.runesDg.GetRuneTransactions(ctx, pkScript, runeId, fromBlock, toBlock, limit, offset)
if err != nil {
return nil, errors.Wrap(err, "error during GetTransactionsByHeight")
}

View File

@@ -0,0 +1,119 @@
package usecase
import (
"context"
"strings"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/runes"
)
func (u *Usecase) GetRunesUTXOsByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64, limit int32, offset int32) ([]*entity.RunesUTXOWithSats, error) {
balances, err := u.runesDg.GetRunesUTXOsByPkScript(ctx, pkScript, blockHeight, limit, offset)
if err != nil {
return nil, errors.Wrap(err, "error during GetBalancesByPkScript")
}
result := make([]*entity.RunesUTXOWithSats, 0, len(balances))
for _, balance := range balances {
tx, err := u.bitcoinClient.GetRawTransactionByTxHash(ctx, balance.OutPoint.Hash)
if err != nil {
if strings.Contains(err.Error(), "No such mempool or blockchain transaction.") {
return nil, errors.WithStack(ErrUTXONotFound)
}
return nil, errors.WithStack(err)
}
result = append(result, &entity.RunesUTXOWithSats{
RunesUTXO: entity.RunesUTXO{
PkScript: balance.PkScript,
OutPoint: balance.OutPoint,
RuneBalances: balance.RuneBalances,
},
Sats: tx.TxOut[balance.OutPoint.Index].Value,
})
}
return result, nil
}
func (u *Usecase) GetRunesUTXOsByRuneIdAndPkScript(ctx context.Context, runeId runes.RuneId, pkScript []byte, blockHeight uint64, limit int32, offset int32) ([]*entity.RunesUTXOWithSats, error) {
balances, err := u.runesDg.GetRunesUTXOsByRuneIdAndPkScript(ctx, runeId, pkScript, blockHeight, limit, offset)
if err != nil {
return nil, errors.Wrap(err, "error during GetBalancesByPkScript")
}
result := make([]*entity.RunesUTXOWithSats, 0, len(balances))
for _, balance := range balances {
tx, err := u.bitcoinClient.GetRawTransactionByTxHash(ctx, balance.OutPoint.Hash)
if err != nil {
if strings.Contains(err.Error(), "No such mempool or blockchain transaction.") {
return nil, errors.WithStack(ErrUTXONotFound)
}
return nil, errors.WithStack(err)
}
result = append(result, &entity.RunesUTXOWithSats{
RunesUTXO: entity.RunesUTXO{
PkScript: balance.PkScript,
OutPoint: balance.OutPoint,
RuneBalances: balance.RuneBalances,
},
Sats: tx.TxOut[balance.OutPoint.Index].Value,
})
}
return result, nil
}
func (u *Usecase) GetUTXOsOutputByLocation(ctx context.Context, txHash chainhash.Hash, outputIdx uint32) (*entity.RunesUTXOWithSats, error) {
tx, err := u.bitcoinClient.GetRawTransactionByTxHash(ctx, txHash)
if err != nil {
if strings.Contains(err.Error(), "No such mempool or blockchain transaction.") {
return nil, errors.WithStack(ErrUTXONotFound)
}
return nil, errors.WithStack(err)
}
// If the output index is out of range, return an error
if len(tx.TxOut) <= int(outputIdx) {
return nil, errors.WithStack(ErrUTXONotFound)
}
rune := &entity.RunesUTXOWithSats{
RunesUTXO: entity.RunesUTXO{
PkScript: tx.TxOut[0].PkScript,
OutPoint: wire.OutPoint{
Hash: txHash,
Index: outputIdx,
},
},
Sats: tx.TxOut[outputIdx].Value,
}
transaction, err := u.runesDg.GetRuneTransaction(ctx, txHash)
// If Bitcoin transaction is not found in the database, return the PkScript and OutPoint
if errors.Is(err, errs.NotFound) {
return rune, nil
}
if err != nil {
return nil, errors.WithStack(err)
}
runeBalance := make([]entity.RunesUTXOBalance, 0, len(transaction.Outputs))
for _, output := range transaction.Outputs {
if output.Index == outputIdx {
runeBalance = append(runeBalance, entity.RunesUTXOBalance{
RuneId: output.RuneId,
Amount: output.Amount,
})
}
}
rune.RuneBalances = runeBalance
return rune, nil
}

View File

@@ -9,4 +9,6 @@ import (
type Contract interface {
GetRawTransactionAndHeightByTxHash(ctx context.Context, txHash chainhash.Hash) (*wire.MsgTx, int64, error)
GetRawTransactionByTxHash(ctx context.Context, txHash chainhash.Hash) (*wire.MsgTx, error)
}

View File

@@ -150,6 +150,18 @@ func (a Address) Equal(b Address) bool {
return a.encoded == b.encoded
}
// DustLimit returns the output dust limit (lowest possible satoshis in a UTXO) for the address type.
func (a Address) DustLimit() int64 {
switch a.encodedType {
case AddressP2TR:
return 330
case AddressP2WPKH:
return 294
default:
return 546
}
}
// MarshalText implements the encoding.TextMarshaler interface.
func (a Address) MarshalText() ([]byte, error) {
return []byte(a.encoded), nil

View File

@@ -447,3 +447,72 @@ func TestAddressPkScript(t *testing.T) {
})
}
}
func TestAddressDustLimit(t *testing.T) {
type Spec struct {
Address string
DefaultNet *chaincfg.Params
ExpectedDustLimit int64
}
specs := []Spec{
{
Address: "bc1qfpgdxtpl7kz5qdus2pmexyjaza99c28q8uyczh",
DefaultNet: &chaincfg.MainNetParams,
ExpectedDustLimit: 294,
},
{
Address: "tb1qfpgdxtpl7kz5qdus2pmexyjaza99c28qd6ltey",
DefaultNet: &chaincfg.MainNetParams,
ExpectedDustLimit: 294,
},
{
Address: "bc1p7h87kqsmpzatddzhdhuy9gmxdpvn5kvar6hhqlgau8d2ffa0pa3qvz5d38",
DefaultNet: &chaincfg.MainNetParams,
ExpectedDustLimit: 330,
},
{
Address: "tb1p7h87kqsmpzatddzhdhuy9gmxdpvn5kvar6hhqlgau8d2ffa0pa3qm2zztg",
DefaultNet: &chaincfg.MainNetParams,
ExpectedDustLimit: 330,
},
{
Address: "3Ccte7SJz71tcssLPZy3TdWz5DTPeNRbPw",
DefaultNet: &chaincfg.MainNetParams,
ExpectedDustLimit: 546,
},
{
Address: "1KrRZSShVkdc8J71CtY4wdw46Rx3BRLKyH",
DefaultNet: &chaincfg.MainNetParams,
ExpectedDustLimit: 546,
},
{
Address: "bc1qeklep85ntjz4605drds6aww9u0qr46qzrv5xswd35uhjuj8ahfcqgf6hak",
DefaultNet: &chaincfg.MainNetParams,
ExpectedDustLimit: 546,
},
{
Address: "migbBPcDajPfffrhoLpYFTQNXQFbWbhpz3",
DefaultNet: &chaincfg.TestNet3Params,
ExpectedDustLimit: 546,
},
{
Address: "tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7",
DefaultNet: &chaincfg.MainNetParams,
ExpectedDustLimit: 546,
},
{
Address: "2NCxMvHPTduZcCuUeAiWUpuwHga7Y66y9XJ",
DefaultNet: &chaincfg.TestNet3Params,
ExpectedDustLimit: 546,
},
}
for _, spec := range specs {
t.Run(spec.Address, func(t *testing.T) {
addr, err := btcutils.SafeNewAddress(spec.Address, spec.DefaultNet)
require.NoError(t, err)
assert.Equal(t, spec.ExpectedDustLimit, addr.DustLimit())
})
}
}

56
pkg/btcutils/fee.go Normal file
View File

@@ -0,0 +1,56 @@
package btcutils
import (
"github.com/btcsuite/btcd/blockchain"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
)
// EstimateSignedTxNetworkFee estimates the network fee for the given transaction. "prevTxOuts" should be list of all outputs used as inputs in the transaction.
// If the transaction has unsigned inputs, the fee will be calculated as if those inputs were signed.
func EstimateSignedTxNetworkFee(tx *wire.MsgTx, prevTxOuts []*wire.TxOut, feeRate int64) (int64, error) {
if len(tx.TxIn) != len(prevTxOuts) {
return 0, errors.Wrapf(errs.InvalidArgument, "tx.TxIn length (%d) must match prevTxOuts length (%d)", len(tx.TxIn), len(prevTxOuts))
}
tx = tx.Copy()
mockPrivateKey, _ := btcec.NewPrivateKey()
for i := range tx.TxIn {
if len(tx.TxIn[i].SignatureScript) > 0 || (len(tx.TxIn[i].Witness) > 0 && len(tx.TxIn[i].Witness[0]) > 0) {
// already signed, skip
continue
}
address, err := ExtractAddressFromPkScript(prevTxOuts[i].PkScript)
if err != nil {
return 0, errors.Wrapf(err, "failed to extract address from pkScript %d", i)
}
// if the input is a taproot script-path spend, we need to sign it with the tapscript
if address.Type() == AddressTaproot && len(tx.TxIn[i].Witness) == 3 {
tx, err = SignTxInputTapScript(tx, mockPrivateKey, prevTxOuts[i], i)
if err != nil {
return 0, errors.Wrapf(err, "failed to sign tx input %d (tapscript)", i)
}
} else {
tx, err = SignTxInput(tx, mockPrivateKey, prevTxOuts[i], i)
if err != nil {
return 0, errors.Wrapf(err, "failed to sign tx input %d", i)
}
}
}
txWeight := blockchain.GetTransactionWeight(btcutil.NewTx(tx))
txVBytes := calVBytes(txWeight)
fee := txVBytes * feeRate
return fee, nil
}
func calVBytes(txWeight int64) int64 {
// VBytes = txWeight/4, a fraction of Vbyte uses 1 Vbyte.
txVBytes := txWeight / 4
if txWeight%4 > 0 {
txVBytes += 1
}
return txVBytes
}

View File

@@ -3,8 +3,12 @@ package btcutils
import (
"github.com/Cleverse/go-utilities/utils"
verifier "github.com/bitonicnl/verify-signed-message/pkg"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
)
func VerifySignature(address string, message string, sigBase64 string, defaultNet ...*chaincfg.Params) error {
@@ -19,3 +23,121 @@ func VerifySignature(address string, message string, sigBase64 string, defaultNe
}
return nil
}
func SignTxInput(tx *wire.MsgTx, privateKey *btcec.PrivateKey, prevTxOut *wire.TxOut, inputIndex int) (*wire.MsgTx, error) {
if privateKey == nil {
return nil, errors.Wrap(errs.InvalidArgument, "PrivateKey is required")
}
if tx == nil {
return nil, errors.Wrap(errs.InvalidArgument, "Tx is required")
}
if prevTxOut == nil {
return nil, errors.Wrap(errs.InvalidArgument, "PrevTxOut is required")
}
prevOutFetcher := txscript.NewCannedPrevOutputFetcher(prevTxOut.PkScript, prevTxOut.Value)
sigHashes := txscript.NewTxSigHashes(tx, prevOutFetcher)
if len(tx.TxIn) <= inputIndex {
return nil, errors.Errorf("input to sign (%d) is out of range", inputIndex)
}
address, err := ExtractAddressFromPkScript(prevTxOut.PkScript)
if err != nil {
return nil, errors.Wrap(err, "failed to extract address")
}
switch address.Type() {
case AddressP2TR:
witness, err := txscript.TaprootWitnessSignature(
tx,
sigHashes,
inputIndex,
prevTxOut.Value,
prevTxOut.PkScript,
txscript.SigHashAll|txscript.SigHashAnyOneCanPay,
privateKey)
if err != nil {
return nil, errors.Wrap(err, "failed to sign")
}
tx.TxIn[inputIndex].Witness = witness
case AddressP2WPKH:
witness, err := txscript.WitnessSignature(
tx,
sigHashes,
inputIndex,
prevTxOut.Value,
prevTxOut.PkScript,
txscript.SigHashAll|txscript.SigHashAnyOneCanPay,
privateKey,
true,
)
if err != nil {
return nil, errors.Wrap(err, "failed to sign")
}
tx.TxIn[inputIndex].Witness = witness
case AddressP2PKH:
sigScript, err := txscript.SignatureScript(
tx,
inputIndex,
prevTxOut.PkScript,
txscript.SigHashAll|txscript.SigHashAnyOneCanPay,
privateKey,
true,
)
if err != nil {
return nil, errors.Wrap(err, "failed to sign")
}
tx.TxIn[inputIndex].SignatureScript = sigScript
default:
return nil, errors.Wrapf(errs.NotSupported, "unsupported input address type %s", address.Type())
}
return tx, nil
}
func SignTxInputTapScript(tx *wire.MsgTx, privateKey *btcec.PrivateKey, prevTxOut *wire.TxOut, inputIndex int) (*wire.MsgTx, error) {
if privateKey == nil {
return nil, errors.Wrap(errs.InvalidArgument, "PrivateKey is required")
}
if tx == nil {
return nil, errors.Wrap(errs.InvalidArgument, "Tx is required")
}
if prevTxOut == nil {
return nil, errors.Wrap(errs.InvalidArgument, "PrevTxOut is required")
}
prevOutFetcher := txscript.NewCannedPrevOutputFetcher(prevTxOut.PkScript, prevTxOut.Value)
sigHashes := txscript.NewTxSigHashes(tx, prevOutFetcher)
if len(tx.TxIn) <= inputIndex {
return nil, errors.Errorf("input to sign (%d) is out of range", inputIndex)
}
address, err := ExtractAddressFromPkScript(prevTxOut.PkScript)
if err != nil {
return nil, errors.Wrap(err, "failed to extract address")
}
if address.Type() != AddressTaproot {
return nil, errors.Errorf("input type must be %s", AddressTaproot)
}
witness := tx.TxIn[inputIndex].Witness
if len(witness) != 3 {
return nil, errors.Wrapf(errs.InvalidArgument, "invalid witness length: expected 3, got %d", len(witness))
}
tapLeaf := txscript.NewBaseTapLeaf(witness[1])
signature, err := txscript.RawTxInTapscriptSignature(
tx,
sigHashes,
inputIndex,
prevTxOut.Value,
prevTxOut.PkScript,
tapLeaf,
txscript.SigHashAll|txscript.SigHashAnyOneCanPay,
privateKey)
if err != nil {
return nil, errors.Wrap(err, "failed to sign")
}
tx.TxIn[inputIndex].Witness[0] = signature
return tx, nil
}

View File

@@ -3,8 +3,14 @@ package btcutils
import (
"testing"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcec/v2/schnorr"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestVerifySignature(t *testing.T) {
@@ -67,3 +73,115 @@ func TestVerifySignature(t *testing.T) {
assert.Error(t, err)
}
}
func TestSignTxInput(t *testing.T) {
generateTxAndPrevTxOutFromPkScript := func(pkScript []byte) (*wire.MsgTx, *wire.TxOut) {
tx := wire.NewMsgTx(wire.TxVersion)
tx.AddTxIn(&wire.TxIn{
PreviousOutPoint: wire.OutPoint{
Index: 1,
},
})
txOut := &wire.TxOut{
Value: 1e8, PkScript: pkScript,
}
tx.AddTxOut(txOut)
// using same value and pkScript as input for simplicity
return tx, txOut
}
verifySignedTx := func(t *testing.T, signedTx *wire.MsgTx, prevTxOut *wire.TxOut) {
t.Helper()
prevOutFetcher := txscript.NewCannedPrevOutputFetcher(prevTxOut.PkScript, prevTxOut.Value)
sigHashes := txscript.NewTxSigHashes(signedTx, prevOutFetcher)
vm, err := txscript.NewEngine(
prevTxOut.PkScript, signedTx, 0, txscript.StandardVerifyFlags,
nil, sigHashes, prevTxOut.Value, prevOutFetcher,
)
require.NoError(t, err)
require.NoError(t, vm.Execute(), "error during signature verification") // no error means success
}
privKey, _ := btcec.NewPrivateKey()
t.Run("P2TR input", func(t *testing.T) {
taprootKey := txscript.ComputeTaprootKeyNoScript(privKey.PubKey())
pkScript, err := txscript.PayToTaprootScript(taprootKey)
require.NoError(t, err)
tx, prevTxOut := generateTxAndPrevTxOutFromPkScript(pkScript)
signedTx, err := SignTxInput(
tx, privKey, prevTxOut, 0,
)
require.NoError(t, err)
verifySignedTx(t, signedTx, prevTxOut)
})
t.Run("tapscript input", func(t *testing.T) {
internalKey := privKey.PubKey()
// Our script will be a simple OP_CHECKSIG as the sole leaf of a
// tapscript tree.
builder := txscript.NewScriptBuilder()
builder.AddData(schnorr.SerializePubKey(internalKey))
builder.AddOp(txscript.OP_CHECKSIG)
tapScript, err := builder.Script()
require.NoError(t, err)
tapLeaf := txscript.NewBaseTapLeaf(tapScript)
tapScriptTree := txscript.AssembleTaprootScriptTree(tapLeaf)
controlBlock := tapScriptTree.LeafMerkleProofs[0].ToControlBlock(
internalKey,
)
controlBlockBytes, err := controlBlock.ToBytes()
require.NoError(t, err)
tapScriptRootHash := tapScriptTree.RootNode.TapHash()
outputKey := txscript.ComputeTaprootOutputKey(
internalKey, tapScriptRootHash[:],
)
p2trScript, err := txscript.PayToTaprootScript(outputKey)
require.NoError(t, err)
tx, prevTxOut := generateTxAndPrevTxOutFromPkScript(p2trScript)
tx.TxIn[0].Witness = wire.TxWitness{
{},
tapScript,
controlBlockBytes,
}
signedTx, err := SignTxInputTapScript(
tx, privKey, prevTxOut, 0,
)
require.NoError(t, err)
verifySignedTx(t, signedTx, prevTxOut)
})
t.Run("P2WPKH input", func(t *testing.T) {
pubKey := privKey.PubKey()
pubKeyHash := btcutil.Hash160(pubKey.SerializeCompressed())
pkScript, err := txscript.NewScriptBuilder().
AddOp(txscript.OP_0).
AddData(pubKeyHash).
Script()
tx, prevTxOut := generateTxAndPrevTxOutFromPkScript(pkScript)
signedTx, err := SignTxInput(
tx, privKey, prevTxOut, 0,
)
require.NoError(t, err)
verifySignedTx(t, signedTx, prevTxOut)
})
t.Run("P2PKH input", func(t *testing.T) {
pubKey := privKey.PubKey()
pubKeyHash := btcutil.Hash160(pubKey.SerializeCompressed())
address, err := btcutil.NewAddressPubKeyHash(pubKeyHash, &chaincfg.MainNetParams)
pkScript, err := txscript.PayToAddrScript(address)
tx, prevTxOut := generateTxAndPrevTxOutFromPkScript(pkScript)
signedTx, err := SignTxInput(
tx, privKey, prevTxOut, 0,
)
require.NoError(t, err)
verifySignedTx(t, signedTx, prevTxOut)
})
}

View File

@@ -64,13 +64,14 @@ func (r *HttpResponse) UnmarshalBody(out any) error {
if err != nil {
return errors.Wrapf(err, "can't uncompress body from %v", r.URL)
}
switch strings.ToLower(string(r.Header.ContentType())) {
case "application/json", "application/json; charset=utf-8":
contentType := strings.ToLower(string(r.Header.ContentType()))
switch {
case strings.Contains(contentType, "application/json"):
if err := json.Unmarshal(body, out); err != nil {
return errors.Wrapf(err, "can't unmarshal json body from %s, %q", r.URL, string(body))
}
return nil
case "text/plain", "text/plain; charset=utf-8":
case strings.Contains(contentType, "text/plain"):
return errors.Errorf("can't unmarshal plain text %q", string(body))
default:
return errors.Errorf("unsupported content type: %s, contents: %v", r.Header.ContentType(), string(r.Body()))
@@ -90,6 +91,10 @@ func (h *Client) request(ctx context.Context, reqOptions RequestOptions) (*HttpR
parsedUrl := h.BaseURL()
parsedUrl.Path = path.Join(parsedUrl.Path, reqOptions.path)
// Because path.Join cleans the joined path. If path ends with /, append "/" to parsedUrl.Path
if strings.HasSuffix(reqOptions.path, "/") && !strings.HasSuffix(parsedUrl.Path, "/") {
parsedUrl.Path += "/"
}
baseQuery := parsedUrl.Query()
for k, v := range reqOptions.Query {
baseQuery[k] = v

View File

@@ -13,10 +13,13 @@ import (
)
type Config struct {
WithRequestHeader bool `env:"REQUEST_HEADER" envDefault:"false" mapstructure:"request_header"`
WithRequestQuery bool `env:"REQUEST_QUERY" envDefault:"false" mapstructure:"request_query"`
Disable bool `env:"DISABLE" envDefault:"false" mapstructure:"disable"` // Disable logger level `INFO`
HiddenRequestHeaders []string `env:"HIDDEN_REQUEST_HEADERS" mapstructure:"hidden_request_headers"`
AllRequestHeaders bool `env:"REQUEST_HEADER" envDefault:"false" mapstructure:"request_header"` // Log all request headers
AllResponseHeaders bool `env:"RESPONSE_HEADER" envDefault:"false" mapstructure:"response_header"` // Log all response headers
AllRequestQueries bool `env:"REQUEST_QUERY" envDefault:"false" mapstructure:"request_query"` // Log all request queries
Disable bool `env:"DISABLE" envDefault:"false" mapstructure:"disable"` // Disable logger level `INFO`
HiddenRequestHeaders []string `env:"HIDDEN_REQUEST_HEADERS" mapstructure:"hidden_request_headers"` // Hide specific headers from log
WithRequestHeaders []string `env:"WITH_REQUEST_HEADERS" mapstructure:"with_request_headers"` // Add specific headers to log (higher priority than `HiddenRequestHeaders`)
With map[string]interface{} `env:"WITH" mapstructure:"with"` // Additional fields to log
}
// New setup request context and information
@@ -25,6 +28,10 @@ func New(config Config) fiber.Handler {
for _, header := range config.HiddenRequestHeaders {
hiddenRequestHeaders[strings.TrimSpace(strings.ToLower(header))] = struct{}{}
}
withRequestHeaders := make(map[string]struct{}, len(config.WithRequestHeaders))
for _, header := range config.WithRequestHeaders {
withRequestHeaders[strings.TrimSpace(strings.ToLower(header))] = struct{}{}
}
return func(c *fiber.Ctx) error {
start := time.Now()
@@ -41,6 +48,11 @@ func New(config Config) fiber.Handler {
slog.String("latencyHuman", latency.String()),
}
// add `with` fields
for k, v := range config.With {
baseAttrs = append(baseAttrs, slog.Any(k, v))
}
// prep request attributes
requestAttributes := []slog.Attr{
slog.Time("time", start),
@@ -64,23 +76,64 @@ func New(config Config) fiber.Handler {
slog.Int("length", len(c.Response().Body())),
}
// request query
if config.WithRequestQuery {
requestAttributes = append(requestAttributes, slog.String("query", string(c.Request().URI().QueryString())))
// request queries
if config.AllRequestQueries {
args := c.Request().URI().QueryArgs()
logAttrs := make([]any, 0, args.Len())
args.VisitAll(func(k, v []byte) {
logAttrs = append(logAttrs, slog.Any(string(k), string(v)))
})
requestAttributes = append(requestAttributes, slog.Group("queries", logAttrs...))
}
// request headers
if config.WithRequestHeader {
if config.AllRequestHeaders || len(config.WithRequestHeaders) > 0 {
kv := []any{}
for k, v := range c.GetReqHeaders() {
h := strings.ToLower(k)
// add headers for WithRequestHeaders
if _, found := withRequestHeaders[h]; found {
goto add
}
// skip hidden headers
if _, found := hiddenRequestHeaders[h]; found {
continue
}
// skip if not AllRequestHeaders
if !config.AllRequestHeaders {
continue
}
add:
val := any(v)
if len(v) == 1 {
val = v[0]
}
kv = append(kv, slog.Any(k, val))
}
requestAttributes = append(requestAttributes, slog.Group("headers", kv...))
}
if config.AllResponseHeaders {
kv := []any{}
for k, v := range c.GetRespHeaders() {
// skip hidden headers
if _, found := hiddenRequestHeaders[strings.ToLower(k)]; found {
continue
}
kv = append(kv, slog.Any(k, v))
}
requestAttributes = append(requestAttributes, slog.Group("header", kv...))
val := any(v)
if len(v) == 1 {
val = v[0]
}
kv = append(kv, slog.Any(k, val))
}
responseAttributes = append(responseAttributes, slog.Group("headers", kv...))
}
level := slog.LevelInfo

View File

@@ -17,3 +17,11 @@ sql:
sql_package: "pgx/v5"
rename:
id: "Id"
- schema: "./modules/nodesale/database/postgresql/migrations"
queries: "./modules/nodesale/database/postgresql/queries"
engine: "postgresql"
gen:
go:
package: "gen"
out: "./modules/nodesale/repository/postgres/gen"
sql_package: "pgx/v5"