mirror of
https://github.com/alexgo-io/gaze-indexer.git
synced 2026-01-12 22:43:22 +08:00
Compare commits
70 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
58f8497997 | ||
|
|
920f7fe07b | ||
|
|
0cb66232ef | ||
|
|
4074548b3e | ||
|
|
c5c9a7bdeb | ||
|
|
58334dd3e4 | ||
|
|
cffe378beb | ||
|
|
9a7ee49228 | ||
|
|
9739f61067 | ||
|
|
f1267b387e | ||
|
|
8883c24c77 | ||
|
|
e9ce8df01a | ||
|
|
3ff73a99f8 | ||
|
|
96afdfd255 | ||
|
|
c49e39be97 | ||
|
|
12985ae432 | ||
|
|
2d51e52b83 | ||
|
|
618220d0cb | ||
|
|
6004744721 | ||
|
|
90ed7bc350 | ||
|
|
7a0fe84e40 | ||
|
|
f1d4651042 | ||
|
|
5f4f50a9e5 | ||
|
|
32c3c5c1d4 | ||
|
|
2a572e6d1e | ||
|
|
aa25a6882b | ||
|
|
6182c63150 | ||
|
|
e1f8eaa3e1 | ||
|
|
107836ae39 | ||
|
|
1bd84b0154 | ||
|
|
de26a4c21d | ||
|
|
1dc57d74e0 | ||
|
|
7c0e28d8ea | ||
|
|
754fd1e997 | ||
|
|
66f03f7107 | ||
|
|
7a863987ec | ||
|
|
f9c6ef8dfd | ||
|
|
22a32468ef | ||
|
|
b1d9f4f574 | ||
|
|
6a5ba528a8 | ||
|
|
6484887710 | ||
|
|
9a1382fb9f | ||
|
|
3d5f3b414c | ||
|
|
6e8a846c27 | ||
|
|
8b690c4f7f | ||
|
|
cc37807ff9 | ||
|
|
9ab16d21e1 | ||
|
|
32fec89914 | ||
|
|
0131de6717 | ||
|
|
206eb65ee7 | ||
|
|
fa810b0aed | ||
|
|
dca63a49fe | ||
|
|
05ade4b9d5 | ||
|
|
074458584b | ||
|
|
db5dc75c41 | ||
|
|
0474627336 | ||
|
|
359436e6eb | ||
|
|
1967895d6d | ||
|
|
7dcbd082ee | ||
|
|
880f4b2e6a | ||
|
|
3f727dc11b | ||
|
|
60717ecc65 | ||
|
|
6998adedb0 | ||
|
|
add0a541b5 | ||
|
|
dad02bf61a | ||
|
|
694baef0aa | ||
|
|
47119c3220 | ||
|
|
6203b104db | ||
|
|
b24f27ec9a | ||
|
|
a663f909fa |
3
.github/workflows/code-analysis.yml
vendored
3
.github/workflows/code-analysis.yml
vendored
@@ -58,6 +58,9 @@ jobs:
|
||||
cache: true # caching and restoring go modules and build outputs.
|
||||
- run: echo "GOVERSION=$(go version)" >> $GITHUB_ENV
|
||||
|
||||
- name: Touch test result file
|
||||
run: echo "" > test_output.json
|
||||
|
||||
- name: Build
|
||||
run: go build -v ./...
|
||||
|
||||
|
||||
2
.github/workflows/sqlc-verify.yml
vendored
2
.github/workflows/sqlc-verify.yml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
- name: Setup Sqlc
|
||||
uses: sqlc-dev/setup-sqlc@v4
|
||||
with:
|
||||
sqlc-version: "1.26.0"
|
||||
sqlc-version: "1.27.0"
|
||||
|
||||
- name: Check Diff
|
||||
run: sqlc diff
|
||||
|
||||
@@ -101,3 +101,6 @@ linters-settings:
|
||||
attr-only: true
|
||||
key-naming-case: snake
|
||||
args-on-sep-lines: true
|
||||
gosec:
|
||||
excludes:
|
||||
- G115
|
||||
|
||||
2
.vscode/settings.json
vendored
2
.vscode/settings.json
vendored
@@ -39,7 +39,7 @@
|
||||
"ui.completion.usePlaceholders": false,
|
||||
"ui.diagnostic.analyses": {
|
||||
// https://github.com/golang/tools/blob/master/gopls/doc/analyzers.md
|
||||
// "fieldalignment": false,
|
||||
"fieldalignment": false,
|
||||
"nilness": true,
|
||||
"shadow": false,
|
||||
"unusedparams": true,
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
<!-- omit from toc -->
|
||||
|
||||
- [Türkçe](https://github.com/Rumeyst/gaze-indexer/blob/turkish-translation/docs/README_tr.md)
|
||||
|
||||
# Gaze Indexer
|
||||
|
||||
Gaze Indexer is an open-source and modular indexing client for Bitcoin meta-protocols with **Unified Consistent APIs** across fungible token protocols.
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/core/indexer"
|
||||
"github.com/gaze-network/indexer-network/internal/config"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale"
|
||||
"github.com/gaze-network/indexer-network/modules/runes"
|
||||
"github.com/gaze-network/indexer-network/pkg/automaxprocs"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
@@ -39,6 +40,7 @@ import (
|
||||
// Register Modules
|
||||
var Modules = do.Package(
|
||||
do.LazyNamed("runes", runes.New),
|
||||
do.LazyNamed("nodesale", nodesale.New),
|
||||
)
|
||||
|
||||
func NewRunCommand() *cobra.Command {
|
||||
|
||||
@@ -6,13 +6,15 @@ import (
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/core/constants"
|
||||
"github.com/gaze-network/indexer-network/modules/runes"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale"
|
||||
runesconstants "github.com/gaze-network/indexer-network/modules/runes/constants"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var versions = map[string]string{
|
||||
"": constants.Version,
|
||||
"runes": runes.Version,
|
||||
"": constants.Version,
|
||||
"runes": runesconstants.Version,
|
||||
"nodesale": nodesale.Version,
|
||||
}
|
||||
|
||||
type versionCmdOptions struct {
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
package common
|
||||
|
||||
// HalvingInterval is the number of blocks between each halving event.
|
||||
const HalvingInterval = 210_000
|
||||
@@ -24,6 +24,9 @@ var (
|
||||
// Skippable is returned when got an error but it can be skipped or ignored and continue
|
||||
Skippable = errors.NewWithDepth(depth, "skippable")
|
||||
|
||||
// Retryable is returned when got an error but it can be retried
|
||||
Retryable = errors.NewWithDepth(depth, "retryable")
|
||||
|
||||
// Unsupported is returned when a feature or result is not supported
|
||||
Unsupported = errors.NewWithDepth(depth, "unsupported")
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
type PublicError struct {
|
||||
err error
|
||||
message string
|
||||
code string // code is optional, it can be used to identify the error type
|
||||
}
|
||||
|
||||
func (p PublicError) Error() string {
|
||||
@@ -21,6 +22,10 @@ func (p PublicError) Message() string {
|
||||
return p.message
|
||||
}
|
||||
|
||||
func (p PublicError) Code() string {
|
||||
return p.code
|
||||
}
|
||||
|
||||
func (p PublicError) Unwrap() error {
|
||||
return p.err
|
||||
}
|
||||
@@ -29,6 +34,10 @@ func NewPublicError(message string) error {
|
||||
return withstack.WithStackDepth(&PublicError{err: errors.New(message), message: message}, 1)
|
||||
}
|
||||
|
||||
func NewPublicErrorWithCode(message string, code string) error {
|
||||
return withstack.WithStackDepth(&PublicError{err: errors.New(message), message: message, code: code}, 1)
|
||||
}
|
||||
|
||||
func WithPublicMessage(err error, prefix string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
@@ -41,3 +50,16 @@ func WithPublicMessage(err error, prefix string) error {
|
||||
}
|
||||
return withstack.WithStackDepth(&PublicError{err: err, message: message}, 1)
|
||||
}
|
||||
|
||||
func WithPublicMessageCode(err error, prefix string, code string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
var message string
|
||||
if prefix != "" {
|
||||
message = fmt.Sprintf("%s: %s", prefix, err.Error())
|
||||
} else {
|
||||
message = err.Error()
|
||||
}
|
||||
return withstack.WithStackDepth(&PublicError{err: err, message: message, code: code}, 1)
|
||||
}
|
||||
|
||||
@@ -1,22 +1,31 @@
|
||||
package common
|
||||
|
||||
import "github.com/btcsuite/btcd/chaincfg"
|
||||
import (
|
||||
"github.com/btcsuite/btcd/chaincfg"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
)
|
||||
|
||||
type Network string
|
||||
|
||||
const (
|
||||
NetworkMainnet Network = "mainnet"
|
||||
NetworkTestnet Network = "testnet"
|
||||
NetworkMainnet Network = "mainnet"
|
||||
NetworkTestnet Network = "testnet"
|
||||
NetworkFractalMainnet Network = "fractal-mainnet"
|
||||
NetworkFractalTestnet Network = "fractal-testnet"
|
||||
)
|
||||
|
||||
var supportedNetworks = map[Network]struct{}{
|
||||
NetworkMainnet: {},
|
||||
NetworkTestnet: {},
|
||||
NetworkMainnet: {},
|
||||
NetworkTestnet: {},
|
||||
NetworkFractalMainnet: {},
|
||||
NetworkFractalTestnet: {},
|
||||
}
|
||||
|
||||
var chainParams = map[Network]*chaincfg.Params{
|
||||
NetworkMainnet: &chaincfg.MainNetParams,
|
||||
NetworkTestnet: &chaincfg.TestNet3Params,
|
||||
NetworkMainnet: &chaincfg.MainNetParams,
|
||||
NetworkTestnet: &chaincfg.TestNet3Params,
|
||||
NetworkFractalMainnet: &chaincfg.MainNetParams,
|
||||
NetworkFractalTestnet: &chaincfg.MainNetParams,
|
||||
}
|
||||
|
||||
func (n Network) IsSupported() bool {
|
||||
@@ -31,3 +40,15 @@ func (n Network) ChainParams() *chaincfg.Params {
|
||||
func (n Network) String() string {
|
||||
return string(n)
|
||||
}
|
||||
|
||||
func (n Network) HalvingInterval() uint64 {
|
||||
switch n {
|
||||
case NetworkMainnet, NetworkTestnet:
|
||||
return 210_000
|
||||
case NetworkFractalMainnet, NetworkFractalTestnet:
|
||||
return 2_100_000
|
||||
default:
|
||||
logger.Panic("invalid network")
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,3 +47,11 @@ modules:
|
||||
password: "password"
|
||||
db_name: "postgres"
|
||||
# url: "postgres://postgres:password@localhost:5432/postgres?sslmode=prefer" # [Optional] This will override other database credentials above.
|
||||
nodesale:
|
||||
postgres:
|
||||
host: "localhost"
|
||||
port: 5432
|
||||
user: "postgres"
|
||||
password: "P@ssw0rd"
|
||||
db_name: "postgres"
|
||||
last_block_default: 400
|
||||
@@ -243,39 +243,32 @@ func (d *BitcoinNodeDatasource) prepareRange(fromHeight, toHeight int64) (start,
|
||||
}
|
||||
|
||||
// GetTransaction fetch transaction from Bitcoin node
|
||||
func (d *BitcoinNodeDatasource) GetTransactionByHash(ctx context.Context, txHash chainhash.Hash) (*types.Transaction, error) {
|
||||
func (d *BitcoinNodeDatasource) GetRawTransactionAndHeightByTxHash(ctx context.Context, txHash chainhash.Hash) (*wire.MsgTx, int64, error) {
|
||||
rawTxVerbose, err := d.btcclient.GetRawTransactionVerbose(&txHash)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get raw transaction")
|
||||
return nil, 0, errors.Wrap(err, "failed to get raw transaction")
|
||||
}
|
||||
|
||||
blockHash, err := chainhash.NewHashFromStr(rawTxVerbose.BlockHash)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse block hash")
|
||||
return nil, 0, errors.Wrap(err, "failed to parse block hash")
|
||||
}
|
||||
block, err := d.btcclient.GetBlockVerboseTx(blockHash)
|
||||
block, err := d.btcclient.GetBlockVerbose(blockHash)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get block header")
|
||||
return nil, 0, errors.Wrap(err, "failed to get block header")
|
||||
}
|
||||
|
||||
// parse tx
|
||||
txBytes, err := hex.DecodeString(rawTxVerbose.Hex)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decode transaction hex")
|
||||
return nil, 0, errors.Wrap(err, "failed to decode transaction hex")
|
||||
}
|
||||
var msgTx wire.MsgTx
|
||||
if err := msgTx.Deserialize(bytes.NewReader(txBytes)); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to deserialize transaction")
|
||||
}
|
||||
var txIndex uint32
|
||||
for i, tx := range block.Tx {
|
||||
if tx.Hex == rawTxVerbose.Hex {
|
||||
txIndex = uint32(i)
|
||||
break
|
||||
}
|
||||
return nil, 0, errors.Wrap(err, "failed to deserialize transaction")
|
||||
}
|
||||
|
||||
return types.ParseMsgTx(&msgTx, block.Height, *blockHash, txIndex), nil
|
||||
return &msgTx, block.Height, nil
|
||||
}
|
||||
|
||||
// GetBlockHeader fetch block header from Bitcoin node
|
||||
@@ -292,3 +285,12 @@ func (d *BitcoinNodeDatasource) GetBlockHeader(ctx context.Context, height int64
|
||||
|
||||
return types.ParseMsgBlockHeader(*block, height), nil
|
||||
}
|
||||
|
||||
func (d *BitcoinNodeDatasource) GetRawTransactionByTxHash(ctx context.Context, txHash chainhash.Hash) (*wire.MsgTx, error) {
|
||||
transaction, err := d.btcclient.GetRawTransaction(&txHash)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get raw transaction")
|
||||
}
|
||||
|
||||
return transaction.MsgTx(), nil
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/core/datasources"
|
||||
@@ -142,7 +143,7 @@ func (i *Indexer[T]) process(ctx context.Context) (err error) {
|
||||
// validate reorg from first input
|
||||
{
|
||||
remoteBlockHeader := firstInputHeader
|
||||
if !remoteBlockHeader.PrevBlock.IsEqual(&i.currentBlock.Hash) {
|
||||
if i.currentBlock.Hash != (chainhash.Hash{}) && !remoteBlockHeader.PrevBlock.IsEqual(&i.currentBlock.Hash) {
|
||||
logger.WarnContext(ctx, "Detected chain reorganization. Searching for fork point...",
|
||||
slogx.String("event", "reorg_detected"),
|
||||
slogx.Stringer("current_hash", i.currentBlock.Hash),
|
||||
@@ -215,7 +216,7 @@ func (i *Indexer[T]) process(ctx context.Context) (err error) {
|
||||
return errors.Wrapf(errs.InternalError, "input is not continuous, input[%d] height: %d, input[%d] height: %d", i-1, prevHeader.Height, i, header.Height)
|
||||
}
|
||||
|
||||
if !header.PrevBlock.IsEqual(&prevHeader.Hash) {
|
||||
if prevHeader.Hash != (chainhash.Hash{}) && !header.PrevBlock.IsEqual(&prevHeader.Hash) {
|
||||
logger.WarnContext(ctx, "Chain Reorganization occurred in the middle of batch fetching inputs, need to try to fetch again")
|
||||
|
||||
// end current round
|
||||
|
||||
165
docs/README_tr.md
Normal file
165
docs/README_tr.md
Normal file
@@ -0,0 +1,165 @@
|
||||
## Çeviriler
|
||||
- [English (İngilizce)](../README.md)
|
||||
|
||||
**Son Güncelleme:** 21 Ağustos 2024
|
||||
> **Not:** Bu belge, topluluk tarafından yapılmış bir çeviridir. Ana README.md dosyasındaki güncellemeler buraya otomatik olarak yansıtılmayabilir. En güncel bilgiler için [İngilizce sürümü](../README.md) inceleyin.
|
||||
|
||||
|
||||
# Gaze Indexer
|
||||
|
||||
Gaze Indexer, değiştirilebilir token protokolleri arasında **Birleştirilmiş Tutarlı API'lere** sahip Bitcoin meta-protokolleri için açık kaynaklı ve modüler bir indeksleme istemcisidir.
|
||||
|
||||
Gaze Indexer, kullanıcıların tüm modülleri tek bir komutla tek bir monolitik örnekte veya dağıtılmış bir mikro hizmet kümesi olarak çalıştırmasına olanak tanıyan **modülerlik** göz önünde bulundurularak oluşturulmuştur.
|
||||
|
||||
Gaze Indexer, verimli veri getirme, yeniden düzenleme algılama ve veritabanı taşıma aracı ile HERHANGİ bir meta-protokol indeksleyici oluşturmak için bir temel görevi görür.
|
||||
Bu, geliştiricilerin **gerçekten** önemli olana odaklanmasını sağlar: Meta-protokol indeksleme mantığı. Yeni meta-protokoller, yeni modüller uygulanarak kolayca eklenebilir.
|
||||
|
||||
- [Modüller](#modules)
|
||||
- [1. Runes](#1-runes)
|
||||
- [Kurulum](#installation)
|
||||
- [Önkoşullar](#prerequisites)
|
||||
- [1. Donanım Gereksinimleri](#1-hardware-requirements)
|
||||
- [2. Bitcoin Core RPC sunucusunu hazırlayın.](#2-prepare-bitcoin-core-rpc-server)
|
||||
- [3. Veritabanı hazırlayın.](#3-prepare-database)
|
||||
- [4. `config.yaml` dosyasını hazırlayın.](#4-prepare-configyaml-file)
|
||||
- [Docker ile yükle (önerilir)](#install-with-docker-recommended)
|
||||
- [Kaynaktan yükle](#install-from-source)
|
||||
|
||||
## Modüller
|
||||
|
||||
### 1. Runes
|
||||
|
||||
Runes Dizinleyici ilk meta-protokol dizinleyicimizdir. Bitcoin işlemlerini kullanarak Runes durumlarını, işlemlerini, rün taşlarını ve bakiyelerini indeksler.
|
||||
Geçmiş Runes verilerini sorgulamak için bir dizi API ile birlikte gelir. Tüm ayrıntılar için [API Referansı] (https://api-docs.gaze.network) adresimize bakın.
|
||||
|
||||
|
||||
## Kurulum
|
||||
|
||||
### Önkoşullar
|
||||
|
||||
#### 1. Donanım Gereksinimleri
|
||||
|
||||
Her modül farklı donanım gereksinimleri gerektirir.
|
||||
| Modül | CPU | RAM |
|
||||
| ------ | --------- | ---- |
|
||||
| Runes | 0,5 çekirdek | 1 GB |
|
||||
|
||||
#### 2. Bitcoin Core RPC sunucusunu hazırlayın.
|
||||
|
||||
Gaze Indexer'ın işlem verilerini kendi barındırdığı ya da QuickNode gibi yönetilen sağlayıcıları kullanan bir Bitcoin Core RPC'den alması gerekir.
|
||||
Bir Bitcoin Core'u kendiniz barındırmak için bkz. https://bitcoin.org/en/full-node.
|
||||
|
||||
#### 3. Veritabanını hazırlayın.
|
||||
|
||||
Gaze Indexer PostgreSQL için birinci sınıf desteğe sahiptir. Diğer veritabanlarını kullanmak isterseniz, her modülün Veri Ağ Geçidi arayüzünü karşılayan kendi veritabanı havuzunuzu uygulayabilirsiniz.
|
||||
İşte her modül için minimum veritabanı disk alanı gereksinimimiz.
|
||||
| Modül | Veritabanı Depolama Alanı (mevcut) | Veritabanı Depolama Alanı (1 yıl içinde) |
|
||||
| ------ | -------------------------- | ---------------------------- |
|
||||
| Runes | 10 GB | 150 GB |
|
||||
|
||||
#### 4. config.yaml` dosyasını hazırlayın.
|
||||
|
||||
```yaml
|
||||
# config.yaml
|
||||
logger:
|
||||
output: TEXT # Output format for logs. current supported formats: "TEXT" | "JSON" | "GCP"
|
||||
debug: false
|
||||
|
||||
# Network to run the indexer on. Current supported networks: "mainnet" | "testnet"
|
||||
network: mainnet
|
||||
|
||||
# Bitcoin Core RPC configuration options.
|
||||
bitcoin_node:
|
||||
host: "" # [Required] Host of Bitcoin Core RPC (without https://)
|
||||
user: "" # Username to authenticate with Bitcoin Core RPC
|
||||
pass: "" # Password to authenticate with Bitcoin Core RPC
|
||||
disable_tls: false # Set to true to disable tls
|
||||
|
||||
# Block reporting configuration options. See Block Reporting section for more details.
|
||||
reporting:
|
||||
disabled: false # Set to true to disable block reporting to Gaze Network. Default is false.
|
||||
base_url: "https://indexer.api.gaze.network" # Defaults to "https://indexer.api.gaze.network" if left empty
|
||||
name: "" # [Required if not disabled] Name of this indexer to show on the Gaze Network dashboard
|
||||
website_url: "" # Public website URL to show on the dashboard. Can be left empty.
|
||||
indexer_api_url: "" # Public url to access this indexer's API. Can be left empty if you want to keep your indexer private.
|
||||
|
||||
# HTTP server configuration options.
|
||||
http_server:
|
||||
port: 8080 # Port to run the HTTP server on for modules with HTTP API handlers.
|
||||
|
||||
# Meta-protocol modules configuration options.
|
||||
modules:
|
||||
# Configuration options for Runes module. Can be removed if not used.
|
||||
runes:
|
||||
database: "postgres" # Database to store Runes data. current supported databases: "postgres"
|
||||
datasource: "bitcoin-node" # Data source to be used for Bitcoin data. current supported data sources: "bitcoin-node".
|
||||
api_handlers: # API handlers to enable. current supported handlers: "http"
|
||||
- http
|
||||
postgres:
|
||||
host: "localhost"
|
||||
port: 5432
|
||||
user: "postgres"
|
||||
password: "password"
|
||||
db_name: "postgres"
|
||||
# url: "postgres://postgres:password@localhost:5432/postgres?sslmode=prefer" # [Optional] This will override other database credentials above.
|
||||
```
|
||||
|
||||
### Docker ile yükleyin (önerilir)
|
||||
|
||||
Kurulum kılavuzumuz için `docker-compose` kullanacağız. Docker-compose.yaml` dosyasının `config.yaml` dosyası ile aynı dizinde olduğundan emin olun.
|
||||
|
||||
```yaml
|
||||
# docker-compose.yaml
|
||||
services:
|
||||
gaze-indexer:
|
||||
image: ghcr.io/gaze-network/gaze-indexer:v0.2.1
|
||||
container_name: gaze-indexer
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 8080:8080 # Expose HTTP server port to host
|
||||
volumes:
|
||||
- "./config.yaml:/app/config.yaml" # mount config.yaml file to the container as "/app/config.yaml"
|
||||
command: ["/app/main", "run", "--modules", "runes"] # Put module flags after "run" commands to select which modules to run.
|
||||
```
|
||||
|
||||
### Kaynaktan yükleyin
|
||||
|
||||
1. Go` sürüm 1.22 veya daha üstünü yükleyin. Go kurulum kılavuzuna bakın [burada](https://go.dev/doc/install).
|
||||
2. Bu depoyu klonlayın.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/gaze-network/gaze-indexer.git
|
||||
cd gaze-indexer
|
||||
```
|
||||
|
||||
3. Ana ikili dosyayı oluşturun.
|
||||
|
||||
```bash
|
||||
# Bağımlılıkları al
|
||||
go mod indir
|
||||
|
||||
# Ana ikili dosyayı oluşturun
|
||||
go build -o gaze main.go
|
||||
```
|
||||
|
||||
4. Veritabanı geçişlerini `migrate` komutu ve modül bayrakları ile çalıştırın.
|
||||
|
||||
```bash
|
||||
./gaze migrate up --runes --database postgres://postgres:password@localhost:5432/postgres
|
||||
```
|
||||
|
||||
5. Dizinleyiciyi `run` komutu ve modül bayrakları ile başlatın.
|
||||
|
||||
```bash
|
||||
./gaze run --modules runes
|
||||
```
|
||||
|
||||
Eğer `config.yaml` dosyası `./app/config.yaml` adresinde bulunmuyorsa, `config.yaml` dosyasının yolunu belirtmek için `--config` bayrağını kullanın.
|
||||
|
||||
```bash
|
||||
./gaze run --modules runes --config /path/to/config.yaml
|
||||
```
|
||||
|
||||
|
||||
## Çeviriler
|
||||
- [English (İngilizce)](../README.md)
|
||||
7
go.mod
7
go.mod
@@ -25,12 +25,15 @@ require (
|
||||
github.com/valyala/fasthttp v1.51.0
|
||||
go.uber.org/automaxprocs v1.5.3
|
||||
golang.org/x/sync v0.7.0
|
||||
google.golang.org/protobuf v1.33.0
|
||||
)
|
||||
|
||||
require github.com/stretchr/objx v0.5.2 // indirect
|
||||
|
||||
require (
|
||||
github.com/andybalholm/brotli v1.0.5 // indirect
|
||||
github.com/bitonicnl/verify-signed-message v0.7.1
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.3 // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.3
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd // indirect
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 // indirect
|
||||
@@ -38,7 +41,7 @@ require (
|
||||
github.com/cockroachdb/redact v1.1.5 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.1 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/getsentry/sentry-go v0.18.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
|
||||
6
go.sum
6
go.sum
@@ -99,6 +99,7 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
@@ -221,6 +222,8 @@ github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMV
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
@@ -230,6 +233,7 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
@@ -310,6 +314,8 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common"
|
||||
nodesaleconfig "github.com/gaze-network/indexer-network/modules/nodesale/config"
|
||||
runesconfig "github.com/gaze-network/indexer-network/modules/runes/config"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
|
||||
@@ -61,7 +62,8 @@ type BitcoinNodeClient struct {
|
||||
}
|
||||
|
||||
type Modules struct {
|
||||
Runes runesconfig.Config `mapstructure:"runes"`
|
||||
Runes runesconfig.Config `mapstructure:"runes"`
|
||||
NodeSale nodesaleconfig.Config `mapstructure:"nodesale"`
|
||||
}
|
||||
|
||||
type HTTPServerConfig struct {
|
||||
|
||||
99
modules/nodesale/api/httphandler/deploy.go
Normal file
99
modules/nodesale/api/httphandler/deploy.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package httphandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/protobuf"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
)
|
||||
|
||||
type deployRequest struct {
|
||||
DeployID string `params:"deployId"`
|
||||
}
|
||||
|
||||
type tierResponse struct {
|
||||
PriceSat uint32 `json:"priceSat"`
|
||||
Limit uint32 `json:"limit"`
|
||||
MaxPerAddress uint32 `json:"maxPerAddress"`
|
||||
Sold int64 `json:"sold"`
|
||||
}
|
||||
|
||||
type deployResponse struct {
|
||||
Id string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
StartsAt int64 `json:"startsAt"`
|
||||
EndsAt int64 `json:"endsAt"`
|
||||
Tiers []tierResponse `json:"tiers"`
|
||||
SellerPublicKey string `json:"sellerPublicKey"`
|
||||
MaxPerAddress uint32 `json:"maxPerAddress"`
|
||||
DeployTxHash string `json:"deployTxHash"`
|
||||
}
|
||||
|
||||
func (h *handler) deployHandler(ctx *fiber.Ctx) error {
|
||||
var request deployRequest
|
||||
err := ctx.ParamsParser(&request)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot parse param")
|
||||
}
|
||||
var blockHeight uint64
|
||||
var txIndex uint32
|
||||
count, err := fmt.Sscanf(request.DeployID, "%d-%d", &blockHeight, &txIndex)
|
||||
if count != 2 || err != nil {
|
||||
return errs.NewPublicError("Invalid deploy ID")
|
||||
}
|
||||
deploys, err := h.nodeSaleDg.GetNodeSale(ctx.UserContext(), datagateway.GetNodeSaleParams{
|
||||
BlockHeight: blockHeight,
|
||||
TxIndex: txIndex,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Cannot get NodeSale from db")
|
||||
}
|
||||
if len(deploys) < 1 {
|
||||
return errs.NewPublicError("NodeSale not found")
|
||||
}
|
||||
|
||||
deploy := deploys[0]
|
||||
|
||||
nodeCount, err := h.nodeSaleDg.GetNodeCountByTierIndex(ctx.UserContext(), datagateway.GetNodeCountByTierIndexParams{
|
||||
SaleBlock: deploy.BlockHeight,
|
||||
SaleTxIndex: deploy.TxIndex,
|
||||
FromTier: 0,
|
||||
ToTier: uint32(len(deploy.Tiers) - 1),
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Cannot get node count from db")
|
||||
}
|
||||
|
||||
tiers := make([]protobuf.Tier, len(deploy.Tiers))
|
||||
tierResponses := make([]tierResponse, len(deploy.Tiers))
|
||||
for i, tierJson := range deploy.Tiers {
|
||||
tier := &tiers[i]
|
||||
err := protojson.Unmarshal(tierJson, tier)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to decode tiers json")
|
||||
}
|
||||
tierResponses[i].Limit = tiers[i].Limit
|
||||
tierResponses[i].MaxPerAddress = tiers[i].MaxPerAddress
|
||||
tierResponses[i].PriceSat = tiers[i].PriceSat
|
||||
tierResponses[i].Sold = nodeCount[i].Count
|
||||
}
|
||||
|
||||
err = ctx.JSON(&deployResponse{
|
||||
Id: request.DeployID,
|
||||
Name: deploy.Name,
|
||||
StartsAt: deploy.StartsAt.UTC().Unix(),
|
||||
EndsAt: deploy.EndsAt.UTC().Unix(),
|
||||
Tiers: tierResponses,
|
||||
SellerPublicKey: deploy.SellerPublicKey,
|
||||
MaxPerAddress: deploy.MaxPerAddress,
|
||||
DeployTxHash: deploy.DeployTxHash,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Go fiber cannot parse JSON")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
56
modules/nodesale/api/httphandler/events.go
Normal file
56
modules/nodesale/api/httphandler/events.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package httphandler
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/protobuf"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
type eventRequest struct {
|
||||
WalletAddress string `query:"walletAddress"`
|
||||
}
|
||||
|
||||
type eventResposne struct {
|
||||
TxHash string `json:"txHash"`
|
||||
BlockHeight int64 `json:"blockHeight"`
|
||||
TxIndex int32 `json:"txIndex"`
|
||||
WalletAddress string `json:"walletAddress"`
|
||||
Action string `json:"action"`
|
||||
ParsedMessage json.RawMessage `json:"parsedMessage"`
|
||||
BlockTimestamp time.Time `json:"blockTimestamp"`
|
||||
BlockHash string `json:"blockHash"`
|
||||
}
|
||||
|
||||
func (h *handler) eventsHandler(ctx *fiber.Ctx) error {
|
||||
var request eventRequest
|
||||
err := ctx.QueryParser(&request)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot parse query")
|
||||
}
|
||||
|
||||
events, err := h.nodeSaleDg.GetEventsByWallet(ctx.UserContext(), request.WalletAddress)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Can't get events from db")
|
||||
}
|
||||
|
||||
responses := make([]eventResposne, len(events))
|
||||
for i, event := range events {
|
||||
responses[i].TxHash = event.TxHash
|
||||
responses[i].BlockHeight = event.BlockHeight
|
||||
responses[i].TxIndex = event.TxIndex
|
||||
responses[i].WalletAddress = event.WalletAddress
|
||||
responses[i].Action = protobuf.Action_name[event.Action]
|
||||
responses[i].ParsedMessage = event.ParsedMessage
|
||||
responses[i].BlockTimestamp = event.BlockTimestamp
|
||||
responses[i].BlockHash = event.BlockHash
|
||||
}
|
||||
|
||||
err = ctx.JSON(responses)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Go fiber cannot parse JSON")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
15
modules/nodesale/api/httphandler/handler.go
Normal file
15
modules/nodesale/api/httphandler/handler.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package httphandler
|
||||
|
||||
import (
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
|
||||
)
|
||||
|
||||
type handler struct {
|
||||
nodeSaleDg datagateway.NodeSaleDataGateway
|
||||
}
|
||||
|
||||
func New(datagateway datagateway.NodeSaleDataGateway) *handler {
|
||||
h := handler{}
|
||||
h.nodeSaleDg = datagateway
|
||||
return &h
|
||||
}
|
||||
26
modules/nodesale/api/httphandler/info.go
Normal file
26
modules/nodesale/api/httphandler/info.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package httphandler
|
||||
|
||||
import (
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
type infoResponse struct {
|
||||
IndexedBlockHeight int64 `json:"indexedBlockHeight"`
|
||||
IndexedBlockHash string `json:"indexedBlockHash"`
|
||||
}
|
||||
|
||||
func (h *handler) infoHandler(ctx *fiber.Ctx) error {
|
||||
block, err := h.nodeSaleDg.GetLastProcessedBlock(ctx.UserContext())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Cannot get last processed block")
|
||||
}
|
||||
err = ctx.JSON(infoResponse{
|
||||
IndexedBlockHeight: block.BlockHeight,
|
||||
IndexedBlockHash: block.BlockHash,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Go fiber cannot parse JSON")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
82
modules/nodesale/api/httphandler/nodes.go
Normal file
82
modules/nodesale/api/httphandler/nodes.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package httphandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
type nodeRequest struct {
|
||||
DeployId string `query:"deployId"`
|
||||
OwnerPublicKey string `query:"ownerPublicKey"`
|
||||
DelegateePublicKey string `query:"delegateePublicKey"`
|
||||
}
|
||||
|
||||
type nodeResponse struct {
|
||||
DeployId string `json:"deployId"`
|
||||
NodeId uint32 `json:"nodeId"`
|
||||
TierIndex int32 `json:"tierIndex"`
|
||||
DelegatedTo string `json:"delegatedTo"`
|
||||
OwnerPublicKey string `json:"ownerPublicKey"`
|
||||
PurchaseTxHash string `json:"purchaseTxHash"`
|
||||
DelegateTxHash string `json:"delegateTxHash"`
|
||||
PurchaseBlockHeight int32 `json:"purchaseBlockHeight"`
|
||||
}
|
||||
|
||||
func (h *handler) nodesHandler(ctx *fiber.Ctx) error {
|
||||
var request nodeRequest
|
||||
err := ctx.QueryParser(&request)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot parse query")
|
||||
}
|
||||
|
||||
ownerPublicKey := request.OwnerPublicKey
|
||||
delegateePublicKey := request.DelegateePublicKey
|
||||
|
||||
var blockHeight int64
|
||||
var txIndex int32
|
||||
count, err := fmt.Sscanf(request.DeployId, "%d-%d", &blockHeight, &txIndex)
|
||||
if count != 2 || err != nil {
|
||||
return errs.NewPublicError("Invalid deploy ID")
|
||||
}
|
||||
|
||||
var nodes []entity.Node
|
||||
if ownerPublicKey == "" {
|
||||
nodes, err = h.nodeSaleDg.GetNodesByDeployment(ctx.UserContext(), blockHeight, txIndex)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Can't get nodes from db")
|
||||
}
|
||||
} else {
|
||||
nodes, err = h.nodeSaleDg.GetNodesByPubkey(ctx.UserContext(), datagateway.GetNodesByPubkeyParams{
|
||||
SaleBlock: blockHeight,
|
||||
SaleTxIndex: txIndex,
|
||||
OwnerPublicKey: ownerPublicKey,
|
||||
DelegatedTo: delegateePublicKey,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Can't get nodes from db")
|
||||
}
|
||||
}
|
||||
|
||||
responses := make([]nodeResponse, len(nodes))
|
||||
for i, node := range nodes {
|
||||
responses[i].DeployId = request.DeployId
|
||||
responses[i].NodeId = node.NodeID
|
||||
responses[i].TierIndex = node.TierIndex
|
||||
responses[i].DelegatedTo = node.DelegatedTo
|
||||
responses[i].OwnerPublicKey = node.OwnerPublicKey
|
||||
responses[i].PurchaseTxHash = node.PurchaseTxHash
|
||||
responses[i].DelegateTxHash = node.DelegateTxHash
|
||||
responses[i].PurchaseBlockHeight = txIndex
|
||||
}
|
||||
|
||||
err = ctx.JSON(responses)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Go fiber cannot parse JSON")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
16
modules/nodesale/api/httphandler/routes.go
Normal file
16
modules/nodesale/api/httphandler/routes.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package httphandler
|
||||
|
||||
import (
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
func (h *handler) Mount(router fiber.Router) error {
|
||||
r := router.Group("/nodesale/v1")
|
||||
|
||||
r.Get("/info", h.infoHandler)
|
||||
r.Get("/deploy/:deployId", h.deployHandler)
|
||||
r.Get("/nodes", h.nodesHandler)
|
||||
r.Get("/events", h.eventsHandler)
|
||||
|
||||
return nil
|
||||
}
|
||||
8
modules/nodesale/config/config.go
Normal file
8
modules/nodesale/config/config.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package config
|
||||
|
||||
import "github.com/gaze-network/indexer-network/internal/postgres"
|
||||
|
||||
type Config struct {
|
||||
Postgres postgres.Config `mapstructure:"postgres"`
|
||||
LastBlockDefault int64 `mapstructure:"last_block_default"`
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
BEGIN;
|
||||
|
||||
DROP TABLE IF EXISTS nodes;
|
||||
DROP TABLE IF EXISTS node_sales;
|
||||
DROP TABLE IF EXISTS events;
|
||||
DROP TABLE IF EXISTS blocks;
|
||||
|
||||
|
||||
COMMIT;
|
||||
@@ -0,0 +1,64 @@
|
||||
BEGIN;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS blocks (
|
||||
"block_height" BIGINT NOT NULL,
|
||||
"block_hash" TEXT NOT NULL,
|
||||
"module" TEXT NOT NULL,
|
||||
PRIMARY KEY("block_height", "block_hash")
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS events (
|
||||
"tx_hash" TEXT NOT NULL PRIMARY KEY,
|
||||
"block_height" BIGINT NOT NULL,
|
||||
"tx_index" INTEGER NOT NULL,
|
||||
"wallet_address" TEXT NOT NULL,
|
||||
"valid" BOOLEAN NOT NULL,
|
||||
"action" INTEGER NOT NULL,
|
||||
"raw_message" BYTEA NOT NULL,
|
||||
"parsed_message" JSONB NOT NULL DEFAULT '{}',
|
||||
"block_timestamp" TIMESTAMP NOT NULL,
|
||||
"block_hash" TEXT NOT NULL,
|
||||
"metadata" JSONB NOT NULL DEFAULT '{}',
|
||||
"reason" TEXT NOT NULL DEFAULT ''
|
||||
);
|
||||
|
||||
INSERT INTO events("tx_hash", "block_height", "tx_index",
|
||||
"wallet_address", "valid", "action",
|
||||
"raw_message", "parsed_message", "block_timestamp",
|
||||
"block_hash", "metadata")
|
||||
VALUES ('', -1, -1,
|
||||
'', false, -1,
|
||||
'', '{}', NOW(),
|
||||
'', '{}');
|
||||
|
||||
CREATE TABLE IF NOT EXISTS node_sales (
|
||||
"block_height" BIGINT NOT NULL,
|
||||
"tx_index" INTEGER NOT NULL,
|
||||
"name" TEXT NOT NULL,
|
||||
"starts_at" TIMESTAMP NOT NULL,
|
||||
"ends_at" TIMESTAMP NOT NULL,
|
||||
"tiers" JSONB[] NOT NULL,
|
||||
"seller_public_key" TEXT NOT NULL,
|
||||
"max_per_address" INTEGER NOT NULL,
|
||||
"deploy_tx_hash" TEXT NOT NULL REFERENCES events(tx_hash) ON DELETE CASCADE,
|
||||
"max_discount_percentage" INTEGER NOT NULL,
|
||||
"seller_wallet" TEXT NOT NULL,
|
||||
PRIMARY KEY ("block_height", "tx_index")
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS nodes (
|
||||
"sale_block" BIGINT NOT NULL,
|
||||
"sale_tx_index" INTEGER NOT NULL,
|
||||
"node_id" INTEGER NOT NULL,
|
||||
"tier_index" INTEGER NOT NULL,
|
||||
"delegated_to" TEXT NOT NULL DEFAULT '',
|
||||
"owner_public_key" TEXT NOT NULL,
|
||||
"purchase_tx_hash" TEXT NOT NULL REFERENCES events(tx_hash) ON DELETE CASCADE,
|
||||
"delegate_tx_hash" TEXT NOT NULL DEFAULT '' REFERENCES events(tx_hash) ON DELETE SET DEFAULT,
|
||||
PRIMARY KEY("sale_block", "sale_tx_index", "node_id"),
|
||||
FOREIGN KEY("sale_block", "sale_tx_index") REFERENCES node_sales("block_height", "tx_index")
|
||||
);
|
||||
|
||||
|
||||
|
||||
COMMIT;
|
||||
15
modules/nodesale/database/postgresql/queries/blocks.sql
Normal file
15
modules/nodesale/database/postgresql/queries/blocks.sql
Normal file
@@ -0,0 +1,15 @@
|
||||
-- name: GetLastProcessedBlock :one
|
||||
SELECT * FROM blocks ORDER BY block_height DESC LIMIT 1;
|
||||
|
||||
|
||||
-- name: GetBlock :one
|
||||
SELECT * FROM blocks
|
||||
WHERE "block_height" = $1;
|
||||
|
||||
-- name: RemoveBlockFrom :execrows
|
||||
DELETE FROM blocks
|
||||
WHERE "block_height" >= @from_block;
|
||||
|
||||
-- name: CreateBlock :exec
|
||||
INSERT INTO blocks ("block_height", "block_hash", "module")
|
||||
VALUES ($1, $2, $3);
|
||||
14
modules/nodesale/database/postgresql/queries/events.sql
Normal file
14
modules/nodesale/database/postgresql/queries/events.sql
Normal file
@@ -0,0 +1,14 @@
|
||||
-- name: RemoveEventsFromBlock :execrows
|
||||
DELETE FROM events
|
||||
WHERE "block_height" >= @from_block;
|
||||
|
||||
-- name: CreateEvent :exec
|
||||
INSERT INTO events ("tx_hash", "block_height", "tx_index", "wallet_address", "valid", "action",
|
||||
"raw_message", "parsed_message", "block_timestamp", "block_hash", "metadata",
|
||||
"reason")
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12);
|
||||
|
||||
-- name: GetEventsByWallet :many
|
||||
SELECT *
|
||||
FROM events
|
||||
WHERE wallet_address = $1;
|
||||
57
modules/nodesale/database/postgresql/queries/nodes.sql
Normal file
57
modules/nodesale/database/postgresql/queries/nodes.sql
Normal file
@@ -0,0 +1,57 @@
|
||||
-- name: ClearDelegate :execrows
|
||||
UPDATE nodes
|
||||
SET "delegated_to" = ''
|
||||
WHERE "delegate_tx_hash" = '';
|
||||
|
||||
-- name: SetDelegates :execrows
|
||||
UPDATE nodes
|
||||
SET delegated_to = @delegatee, delegate_tx_hash = $3
|
||||
WHERE sale_block = $1 AND
|
||||
sale_tx_index = $2 AND
|
||||
node_id = ANY (@node_ids::int[]);
|
||||
|
||||
-- name: GetNodesByIds :many
|
||||
SELECT *
|
||||
FROM nodes
|
||||
WHERE sale_block = $1 AND
|
||||
sale_tx_index = $2 AND
|
||||
node_id = ANY (@node_ids::int[]);
|
||||
|
||||
|
||||
-- name: GetNodesByOwner :many
|
||||
SELECT *
|
||||
FROM nodes
|
||||
WHERE sale_block = $1 AND
|
||||
sale_tx_index = $2 AND
|
||||
owner_public_key = $3
|
||||
ORDER BY tier_index;
|
||||
|
||||
-- name: GetNodesByPubkey :many
|
||||
SELECT nodes.*
|
||||
FROM nodes JOIN events ON nodes.purchase_tx_hash = events.tx_hash
|
||||
WHERE sale_block = $1 AND
|
||||
sale_tx_index = $2 AND
|
||||
owner_public_key = $3 AND
|
||||
delegated_to = $4;
|
||||
|
||||
-- name: CreateNode :exec
|
||||
INSERT INTO nodes (sale_block, sale_tx_index, node_id, tier_index, delegated_to, owner_public_key, purchase_tx_hash, delegate_tx_hash)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8);
|
||||
|
||||
-- name: GetNodeCountByTierIndex :many
|
||||
SELECT (tiers.tier_index)::int AS tier_index, count(nodes.tier_index)
|
||||
FROM generate_series(@from_tier::int,@to_tier::int) AS tiers(tier_index)
|
||||
LEFT JOIN
|
||||
(SELECT *
|
||||
FROM nodes
|
||||
WHERE sale_block = $1 AND
|
||||
sale_tx_index= $2)
|
||||
AS nodes ON tiers.tier_index = nodes.tier_index
|
||||
GROUP BY tiers.tier_index
|
||||
ORDER BY tiers.tier_index;
|
||||
|
||||
-- name: GetNodesByDeployment :many
|
||||
SELECT *
|
||||
FROM nodes
|
||||
WHERE sale_block = $1 AND
|
||||
sale_tx_index = $2;
|
||||
@@ -0,0 +1,9 @@
|
||||
-- name: CreateNodeSale :exec
|
||||
INSERT INTO node_sales ("block_height", "tx_index", "name", "starts_at", "ends_at", "tiers", "seller_public_key", "max_per_address", "deploy_tx_hash", "max_discount_percentage", "seller_wallet")
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11);
|
||||
|
||||
-- name: GetNodeSale :many
|
||||
SELECT *
|
||||
FROM node_sales
|
||||
WHERE block_height = $1 AND
|
||||
tx_index = $2;
|
||||
3
modules/nodesale/database/postgresql/queries/test.sql
Normal file
3
modules/nodesale/database/postgresql/queries/test.sql
Normal file
@@ -0,0 +1,3 @@
|
||||
-- name: ClearEvents :exec
|
||||
DELETE FROM events
|
||||
WHERE tx_hash <> '';
|
||||
1135
modules/nodesale/datagateway/mocks/NodeSaleDataGatewayWithTx.go
Normal file
1135
modules/nodesale/datagateway/mocks/NodeSaleDataGatewayWithTx.go
Normal file
File diff suppressed because it is too large
Load Diff
77
modules/nodesale/datagateway/nodesale.go
Normal file
77
modules/nodesale/datagateway/nodesale.go
Normal file
@@ -0,0 +1,77 @@
|
||||
package datagateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
|
||||
)
|
||||
|
||||
type NodeSaleDataGateway interface {
|
||||
BeginNodeSaleTx(ctx context.Context) (NodeSaleDataGatewayWithTx, error)
|
||||
CreateBlock(ctx context.Context, arg entity.Block) error
|
||||
GetBlock(ctx context.Context, blockHeight int64) (*entity.Block, error)
|
||||
GetLastProcessedBlock(ctx context.Context) (*entity.Block, error)
|
||||
RemoveBlockFrom(ctx context.Context, fromBlock int64) (int64, error)
|
||||
RemoveEventsFromBlock(ctx context.Context, fromBlock int64) (int64, error)
|
||||
ClearDelegate(ctx context.Context) (int64, error)
|
||||
GetNodesByIds(ctx context.Context, arg GetNodesByIdsParams) ([]entity.Node, error)
|
||||
CreateEvent(ctx context.Context, arg entity.NodeSaleEvent) error
|
||||
SetDelegates(ctx context.Context, arg SetDelegatesParams) (int64, error)
|
||||
CreateNodeSale(ctx context.Context, arg entity.NodeSale) error
|
||||
GetNodeSale(ctx context.Context, arg GetNodeSaleParams) ([]entity.NodeSale, error)
|
||||
GetNodesByOwner(ctx context.Context, arg GetNodesByOwnerParams) ([]entity.Node, error)
|
||||
CreateNode(ctx context.Context, arg entity.Node) error
|
||||
GetNodeCountByTierIndex(ctx context.Context, arg GetNodeCountByTierIndexParams) ([]GetNodeCountByTierIndexRow, error)
|
||||
GetNodesByPubkey(ctx context.Context, arg GetNodesByPubkeyParams) ([]entity.Node, error)
|
||||
GetNodesByDeployment(ctx context.Context, saleBlock int64, saleTxIndex int32) ([]entity.Node, error)
|
||||
GetEventsByWallet(ctx context.Context, walletAddress string) ([]entity.NodeSaleEvent, error)
|
||||
}
|
||||
|
||||
type NodeSaleDataGatewayWithTx interface {
|
||||
NodeSaleDataGateway
|
||||
Tx
|
||||
}
|
||||
|
||||
type GetNodesByIdsParams struct {
|
||||
SaleBlock uint64
|
||||
SaleTxIndex uint32
|
||||
NodeIds []uint32
|
||||
}
|
||||
|
||||
type SetDelegatesParams struct {
|
||||
SaleBlock uint64
|
||||
SaleTxIndex int32
|
||||
Delegatee string
|
||||
DelegateTxHash string
|
||||
NodeIds []uint32
|
||||
}
|
||||
|
||||
type GetNodeSaleParams struct {
|
||||
BlockHeight uint64
|
||||
TxIndex uint32
|
||||
}
|
||||
|
||||
type GetNodesByOwnerParams struct {
|
||||
SaleBlock uint64
|
||||
SaleTxIndex uint32
|
||||
OwnerPublicKey string
|
||||
}
|
||||
|
||||
type GetNodeCountByTierIndexParams struct {
|
||||
SaleBlock uint64
|
||||
SaleTxIndex uint32
|
||||
FromTier uint32
|
||||
ToTier uint32
|
||||
}
|
||||
|
||||
type GetNodeCountByTierIndexRow struct {
|
||||
TierIndex int32
|
||||
Count int64
|
||||
}
|
||||
|
||||
type GetNodesByPubkeyParams struct {
|
||||
SaleBlock int64
|
||||
SaleTxIndex int32
|
||||
OwnerPublicKey string
|
||||
DelegatedTo string
|
||||
}
|
||||
12
modules/nodesale/datagateway/tx.go
Normal file
12
modules/nodesale/datagateway/tx.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package datagateway
|
||||
|
||||
import "context"
|
||||
|
||||
type Tx interface {
|
||||
// Commit commits the DB transaction. All changes made after Begin() will be persisted. Calling Commit() will close the current transaction.
|
||||
// If Commit() is called without a prior Begin(), it must be a no-op.
|
||||
Commit(ctx context.Context) error
|
||||
// Rollback rolls back the DB transaction. All changes made after Begin() will be discarded.
|
||||
// Rollback() must be safe to call even if no transaction is active. Hence, a defer Rollback() is safe, even if Commit() was called prior with non-error conditions.
|
||||
Rollback(ctx context.Context) error
|
||||
}
|
||||
61
modules/nodesale/delegate.go
Normal file
61
modules/nodesale/delegate.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package nodesale
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
|
||||
delegatevalidator "github.com/gaze-network/indexer-network/modules/nodesale/internal/validator/delegate"
|
||||
)
|
||||
|
||||
func (p *Processor) ProcessDelegate(ctx context.Context, qtx datagateway.NodeSaleDataGatewayWithTx, block *types.Block, event NodeSaleEvent) error {
|
||||
validator := delegatevalidator.New()
|
||||
delegate := event.EventMessage.Delegate
|
||||
|
||||
_, nodes, err := validator.NodesExist(ctx, qtx, delegate.DeployID, delegate.NodeIDs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Cannot query")
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
valid := validator.EqualXonlyPublicKey(node.OwnerPublicKey, event.TxPubkey)
|
||||
if !valid {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
err = qtx.CreateEvent(ctx, entity.NodeSaleEvent{
|
||||
TxHash: event.Transaction.TxHash.String(),
|
||||
TxIndex: int32(event.Transaction.Index),
|
||||
Action: int32(event.EventMessage.Action),
|
||||
RawMessage: event.RawData,
|
||||
ParsedMessage: event.EventJson,
|
||||
BlockTimestamp: block.Header.Timestamp,
|
||||
BlockHash: event.Transaction.BlockHash.String(),
|
||||
BlockHeight: event.Transaction.BlockHeight,
|
||||
Valid: validator.Valid,
|
||||
WalletAddress: p.PubkeyToPkHashAddress(event.TxPubkey).EncodeAddress(),
|
||||
Metadata: nil,
|
||||
Reason: validator.Reason,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to insert event")
|
||||
}
|
||||
|
||||
if validator.Valid {
|
||||
_, err = qtx.SetDelegates(ctx, datagateway.SetDelegatesParams{
|
||||
SaleBlock: delegate.DeployID.Block,
|
||||
SaleTxIndex: int32(delegate.DeployID.TxIndex),
|
||||
Delegatee: delegate.DelegateePublicKey,
|
||||
DelegateTxHash: event.Transaction.TxHash.String(),
|
||||
NodeIds: delegate.NodeIDs,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to set delegate")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
84
modules/nodesale/delegate_test.go
Normal file
84
modules/nodesale/delegate_test.go
Normal file
@@ -0,0 +1,84 @@
|
||||
package nodesale
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec/v2"
|
||||
"github.com/gaze-network/indexer-network/common"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway/mocks"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/protobuf"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDelegate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
|
||||
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
|
||||
|
||||
buyerPrivateKey, _ := btcec.NewPrivateKey()
|
||||
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
|
||||
|
||||
delegateePrivateKey, _ := btcec.NewPrivateKey()
|
||||
delegateePubkeyHex := hex.EncodeToString(delegateePrivateKey.PubKey().SerializeCompressed())
|
||||
|
||||
delegateMessage := &protobuf.NodeSaleEvent{
|
||||
Action: protobuf.Action_ACTION_DELEGATE,
|
||||
Delegate: &protobuf.ActionDelegate{
|
||||
DelegateePublicKey: delegateePubkeyHex,
|
||||
NodeIDs: []uint32{9, 10},
|
||||
DeployID: &protobuf.ActionID{
|
||||
Block: uint64(testBlockHeight) - 2,
|
||||
TxIndex: uint32(testTxIndex) - 2,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
event, block := assembleTestEvent(buyerPrivateKey, "131313131313", "131313131313", 0, 0, delegateMessage)
|
||||
|
||||
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
|
||||
return event.Valid == true
|
||||
})).Return(nil)
|
||||
|
||||
mockDgTx.EXPECT().GetNodesByIds(mock.Anything, datagateway.GetNodesByIdsParams{
|
||||
SaleBlock: delegateMessage.Delegate.DeployID.Block,
|
||||
SaleTxIndex: delegateMessage.Delegate.DeployID.TxIndex,
|
||||
NodeIds: []uint32{9, 10},
|
||||
}).Return([]entity.Node{
|
||||
{
|
||||
SaleBlock: delegateMessage.Delegate.DeployID.Block,
|
||||
SaleTxIndex: delegateMessage.Delegate.DeployID.TxIndex,
|
||||
NodeID: 9,
|
||||
TierIndex: 1,
|
||||
DelegatedTo: "",
|
||||
OwnerPublicKey: buyerPubkeyHex,
|
||||
PurchaseTxHash: mock.Anything,
|
||||
DelegateTxHash: "",
|
||||
},
|
||||
{
|
||||
SaleBlock: delegateMessage.Delegate.DeployID.Block,
|
||||
SaleTxIndex: delegateMessage.Delegate.DeployID.TxIndex,
|
||||
NodeID: 10,
|
||||
TierIndex: 2,
|
||||
DelegatedTo: "",
|
||||
OwnerPublicKey: buyerPubkeyHex,
|
||||
PurchaseTxHash: mock.Anything,
|
||||
DelegateTxHash: "",
|
||||
},
|
||||
}, nil)
|
||||
|
||||
mockDgTx.EXPECT().SetDelegates(mock.Anything, datagateway.SetDelegatesParams{
|
||||
SaleBlock: delegateMessage.Delegate.DeployID.Block,
|
||||
SaleTxIndex: int32(delegateMessage.Delegate.DeployID.TxIndex),
|
||||
Delegatee: delegateMessage.Delegate.DelegateePublicKey,
|
||||
DelegateTxHash: event.Transaction.TxHash.String(),
|
||||
NodeIds: delegateMessage.Delegate.NodeIDs,
|
||||
}).Return(2, nil)
|
||||
|
||||
err := p.ProcessDelegate(ctx, mockDgTx, block, event)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
67
modules/nodesale/deploy.go
Normal file
67
modules/nodesale/deploy.go
Normal file
@@ -0,0 +1,67 @@
|
||||
package nodesale
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/internal/validator"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
)
|
||||
|
||||
func (p *Processor) ProcessDeploy(ctx context.Context, qtx datagateway.NodeSaleDataGatewayWithTx, block *types.Block, event NodeSaleEvent) error {
|
||||
deploy := event.EventMessage.Deploy
|
||||
|
||||
validator := validator.New()
|
||||
|
||||
validator.EqualXonlyPublicKey(deploy.SellerPublicKey, event.TxPubkey)
|
||||
|
||||
err := qtx.CreateEvent(ctx, entity.NodeSaleEvent{
|
||||
TxHash: event.Transaction.TxHash.String(),
|
||||
TxIndex: int32(event.Transaction.Index),
|
||||
Action: int32(event.EventMessage.Action),
|
||||
RawMessage: event.RawData,
|
||||
ParsedMessage: event.EventJson,
|
||||
BlockTimestamp: block.Header.Timestamp,
|
||||
BlockHash: event.Transaction.BlockHash.String(),
|
||||
BlockHeight: event.Transaction.BlockHeight,
|
||||
Valid: validator.Valid,
|
||||
WalletAddress: p.PubkeyToPkHashAddress(event.TxPubkey).EncodeAddress(),
|
||||
Metadata: nil,
|
||||
Reason: validator.Reason,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to insert event")
|
||||
}
|
||||
if validator.Valid {
|
||||
tiers := make([][]byte, len(deploy.Tiers))
|
||||
for i, tier := range deploy.Tiers {
|
||||
tierJson, err := protojson.Marshal(tier)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to parse tiers to json")
|
||||
}
|
||||
tiers[i] = tierJson
|
||||
}
|
||||
err = qtx.CreateNodeSale(ctx, entity.NodeSale{
|
||||
BlockHeight: uint64(event.Transaction.BlockHeight),
|
||||
TxIndex: event.Transaction.Index,
|
||||
Name: deploy.Name,
|
||||
StartsAt: time.Unix(int64(deploy.StartsAt), 0),
|
||||
EndsAt: time.Unix(int64(deploy.EndsAt), 0),
|
||||
Tiers: tiers,
|
||||
SellerPublicKey: deploy.SellerPublicKey,
|
||||
MaxPerAddress: deploy.MaxPerAddress,
|
||||
DeployTxHash: event.Transaction.TxHash.String(),
|
||||
MaxDiscountPercentage: int32(deploy.MaxDiscountPercentage),
|
||||
SellerWallet: deploy.SellerWallet,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to insert NodeSale")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
139
modules/nodesale/deploy_test.go
Normal file
139
modules/nodesale/deploy_test.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package nodesale
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec/v2"
|
||||
"github.com/gaze-network/indexer-network/common"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway/mocks"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/protobuf"
|
||||
"github.com/samber/lo"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
)
|
||||
|
||||
func TestDeployInvalid(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
|
||||
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
|
||||
|
||||
prvKey, err := btcec.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
strangerKey, err := btcec.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
strangerPubkeyHex := hex.EncodeToString(strangerKey.PubKey().SerializeCompressed())
|
||||
|
||||
sellerWallet := p.PubkeyToPkHashAddress(prvKey.PubKey())
|
||||
|
||||
message := &protobuf.NodeSaleEvent{
|
||||
Action: protobuf.Action_ACTION_DEPLOY,
|
||||
Deploy: &protobuf.ActionDeploy{
|
||||
Name: t.Name(),
|
||||
StartsAt: 100,
|
||||
EndsAt: 200,
|
||||
Tiers: []*protobuf.Tier{
|
||||
{
|
||||
PriceSat: 100,
|
||||
Limit: 5,
|
||||
MaxPerAddress: 100,
|
||||
},
|
||||
{
|
||||
PriceSat: 200,
|
||||
Limit: 5,
|
||||
MaxPerAddress: 100,
|
||||
},
|
||||
},
|
||||
SellerPublicKey: strangerPubkeyHex,
|
||||
MaxPerAddress: 100,
|
||||
MaxDiscountPercentage: 50,
|
||||
SellerWallet: sellerWallet.EncodeAddress(),
|
||||
},
|
||||
}
|
||||
|
||||
event, block := assembleTestEvent(prvKey, "0101010101", "0101010101", 0, 0, message)
|
||||
|
||||
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
|
||||
return event.Valid == false
|
||||
})).Return(nil)
|
||||
|
||||
err = p.ProcessDeploy(ctx, mockDgTx, block, event)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockDgTx.AssertNotCalled(t, "CreateNodeSale")
|
||||
}
|
||||
|
||||
func TestDeployValid(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
|
||||
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
|
||||
|
||||
privateKey, err := btcec.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
pubkeyHex := hex.EncodeToString(privateKey.PubKey().SerializeCompressed())
|
||||
|
||||
sellerWallet := p.PubkeyToPkHashAddress(privateKey.PubKey())
|
||||
|
||||
startAt := time.Now().Add(time.Hour * -1)
|
||||
endAt := time.Now().Add(time.Hour * 1)
|
||||
|
||||
message := &protobuf.NodeSaleEvent{
|
||||
Action: protobuf.Action_ACTION_DEPLOY,
|
||||
Deploy: &protobuf.ActionDeploy{
|
||||
Name: t.Name(),
|
||||
StartsAt: uint32(startAt.UTC().Unix()),
|
||||
EndsAt: uint32(endAt.UTC().Unix()),
|
||||
Tiers: []*protobuf.Tier{
|
||||
{
|
||||
PriceSat: 100,
|
||||
Limit: 5,
|
||||
MaxPerAddress: 100,
|
||||
},
|
||||
{
|
||||
PriceSat: 200,
|
||||
Limit: 5,
|
||||
MaxPerAddress: 100,
|
||||
},
|
||||
},
|
||||
SellerPublicKey: pubkeyHex,
|
||||
MaxPerAddress: 100,
|
||||
MaxDiscountPercentage: 50,
|
||||
SellerWallet: sellerWallet.EncodeAddress(),
|
||||
},
|
||||
}
|
||||
|
||||
event, block := assembleTestEvent(privateKey, "0202020202", "0202020202", 0, 0, message)
|
||||
|
||||
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
|
||||
return event.Valid == true
|
||||
})).Return(nil)
|
||||
|
||||
tiers := lo.Map(message.Deploy.Tiers, func(tier *protobuf.Tier, _ int) []byte {
|
||||
tierJson, err := protojson.Marshal(tier)
|
||||
require.NoError(t, err)
|
||||
return tierJson
|
||||
})
|
||||
|
||||
mockDgTx.EXPECT().CreateNodeSale(mock.Anything, entity.NodeSale{
|
||||
BlockHeight: uint64(event.Transaction.BlockHeight),
|
||||
TxIndex: uint32(event.Transaction.Index),
|
||||
Name: message.Deploy.Name,
|
||||
StartsAt: time.Unix(int64(message.Deploy.StartsAt), 0),
|
||||
EndsAt: time.Unix(int64(message.Deploy.EndsAt), 0),
|
||||
Tiers: tiers,
|
||||
SellerPublicKey: message.Deploy.SellerPublicKey,
|
||||
MaxPerAddress: message.Deploy.MaxPerAddress,
|
||||
DeployTxHash: event.Transaction.TxHash.String(),
|
||||
MaxDiscountPercentage: int32(message.Deploy.MaxDiscountPercentage),
|
||||
SellerWallet: message.Deploy.SellerWallet,
|
||||
}).Return(nil)
|
||||
|
||||
p.ProcessDeploy(ctx, mockDgTx, block, event)
|
||||
}
|
||||
55
modules/nodesale/internal/entity/entity.go
Normal file
55
modules/nodesale/internal/entity/entity.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package entity
|
||||
|
||||
import "time"
|
||||
|
||||
type Block struct {
|
||||
BlockHeight int64
|
||||
BlockHash string
|
||||
Module string
|
||||
}
|
||||
|
||||
type Node struct {
|
||||
SaleBlock uint64
|
||||
SaleTxIndex uint32
|
||||
NodeID uint32
|
||||
TierIndex int32
|
||||
DelegatedTo string
|
||||
OwnerPublicKey string
|
||||
PurchaseTxHash string
|
||||
DelegateTxHash string
|
||||
}
|
||||
|
||||
type NodeSale struct {
|
||||
BlockHeight uint64
|
||||
TxIndex uint32
|
||||
Name string
|
||||
StartsAt time.Time
|
||||
EndsAt time.Time
|
||||
Tiers [][]byte
|
||||
SellerPublicKey string
|
||||
MaxPerAddress uint32
|
||||
DeployTxHash string
|
||||
MaxDiscountPercentage int32
|
||||
SellerWallet string
|
||||
}
|
||||
|
||||
type NodeSaleEvent struct {
|
||||
TxHash string
|
||||
BlockHeight int64
|
||||
TxIndex int32
|
||||
WalletAddress string
|
||||
Valid bool
|
||||
Action int32
|
||||
RawMessage []byte
|
||||
ParsedMessage []byte
|
||||
BlockTimestamp time.Time
|
||||
BlockHash string
|
||||
Metadata *MetadataEventPurchase
|
||||
Reason string
|
||||
}
|
||||
|
||||
type MetadataEventPurchase struct {
|
||||
ExpectedTotalAmountDiscounted uint64
|
||||
ReportedTotalAmount uint64
|
||||
PaidTotalAmount uint64
|
||||
}
|
||||
51
modules/nodesale/internal/validator/delegate/validator.go
Normal file
51
modules/nodesale/internal/validator/delegate/validator.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package delegate
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/internal/validator"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/protobuf"
|
||||
)
|
||||
|
||||
type DelegateValidator struct {
|
||||
validator.Validator
|
||||
}
|
||||
|
||||
func New() *DelegateValidator {
|
||||
v := validator.New()
|
||||
return &DelegateValidator{
|
||||
Validator: *v,
|
||||
}
|
||||
}
|
||||
|
||||
func (v *DelegateValidator) NodesExist(
|
||||
ctx context.Context,
|
||||
qtx datagateway.NodeSaleDataGatewayWithTx,
|
||||
deployId *protobuf.ActionID,
|
||||
nodeIds []uint32,
|
||||
) (bool, []entity.Node, error) {
|
||||
if !v.Valid {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
nodes, err := qtx.GetNodesByIds(ctx, datagateway.GetNodesByIdsParams{
|
||||
SaleBlock: deployId.Block,
|
||||
SaleTxIndex: deployId.TxIndex,
|
||||
NodeIds: nodeIds,
|
||||
})
|
||||
if err != nil {
|
||||
v.Valid = false
|
||||
return v.Valid, nil, errors.Wrap(err, "Failed to get nodes")
|
||||
}
|
||||
|
||||
if len(nodeIds) != len(nodes) {
|
||||
v.Valid = false
|
||||
return v.Valid, nil, nil
|
||||
}
|
||||
|
||||
v.Valid = true
|
||||
return v.Valid, nodes, nil
|
||||
}
|
||||
6
modules/nodesale/internal/validator/errors.go
Normal file
6
modules/nodesale/internal/validator/errors.go
Normal file
@@ -0,0 +1,6 @@
|
||||
package validator
|
||||
|
||||
const (
|
||||
INVALID_PUBKEY_FORMAT = "Cannot parse public key"
|
||||
INVALID_PUBKEY = "Invalid public key"
|
||||
)
|
||||
17
modules/nodesale/internal/validator/purchase/errors.go
Normal file
17
modules/nodesale/internal/validator/purchase/errors.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package purchase
|
||||
|
||||
const (
|
||||
DEPLOYID_NOT_FOUND = "Depoloy ID not found."
|
||||
PURCHASE_TIMEOUT = "Purchase timeout."
|
||||
BLOCK_HEIGHT_TIMEOUT = "Block height over timeout block"
|
||||
INVALID_SIGNATURE_FORMAT = "Cannot parse signature."
|
||||
INVALID_SIGNATURE = "Invalid Signature."
|
||||
INVALID_TIER_JSON = "Invalid Tier format"
|
||||
INVALID_NODE_ID = "Invalid NodeId."
|
||||
NODE_ALREADY_PURCHASED = "Some node has been purchased."
|
||||
INVALID_SELLER_ADDR_FORMAT = "Invalid seller address."
|
||||
INVALID_PAYMENT = "Total amount paid less than reported price"
|
||||
INSUFFICIENT_FUND = "Insufficient fund"
|
||||
OVER_LIMIT_PER_ADDR = "Purchase over limit per address."
|
||||
OVER_LIMIT_PER_TIER = "Purchase over limit per tier."
|
||||
)
|
||||
283
modules/nodesale/internal/validator/purchase/validator.go
Normal file
283
modules/nodesale/internal/validator/purchase/validator.go
Normal file
@@ -0,0 +1,283 @@
|
||||
package purchase
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec/v2"
|
||||
"github.com/btcsuite/btcd/btcec/v2/ecdsa"
|
||||
"github.com/btcsuite/btcd/chaincfg"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/internal/validator"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/protobuf"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type PurchaseValidator struct {
|
||||
validator.Validator
|
||||
}
|
||||
|
||||
func New() *PurchaseValidator {
|
||||
v := validator.New()
|
||||
return &PurchaseValidator{
|
||||
Validator: *v,
|
||||
}
|
||||
}
|
||||
|
||||
func (v *PurchaseValidator) NodeSaleExists(ctx context.Context, qtx datagateway.NodeSaleDataGatewayWithTx, payload *protobuf.PurchasePayload) (bool, *entity.NodeSale, error) {
|
||||
if !v.Valid {
|
||||
return false, nil, nil
|
||||
}
|
||||
// check node existed
|
||||
deploys, err := qtx.GetNodeSale(ctx, datagateway.GetNodeSaleParams{
|
||||
BlockHeight: payload.DeployID.Block,
|
||||
TxIndex: payload.DeployID.TxIndex,
|
||||
})
|
||||
if err != nil {
|
||||
v.Valid = false
|
||||
return v.Valid, nil, errors.Wrap(err, "Failed to Get NodeSale")
|
||||
}
|
||||
if len(deploys) < 1 {
|
||||
v.Valid = false
|
||||
v.Reason = DEPLOYID_NOT_FOUND
|
||||
return v.Valid, nil, nil
|
||||
}
|
||||
v.Valid = true
|
||||
return v.Valid, &deploys[0], nil
|
||||
}
|
||||
|
||||
func (v *PurchaseValidator) ValidTimestamp(deploy *entity.NodeSale, timestamp time.Time) bool {
|
||||
if !v.Valid {
|
||||
return false
|
||||
}
|
||||
if timestamp.Before(deploy.StartsAt) ||
|
||||
timestamp.After(deploy.EndsAt) {
|
||||
v.Valid = false
|
||||
v.Reason = PURCHASE_TIMEOUT
|
||||
return v.Valid
|
||||
}
|
||||
v.Valid = true
|
||||
return v.Valid
|
||||
}
|
||||
|
||||
func (v *PurchaseValidator) WithinTimeoutBlock(timeOutBlock uint64, blockHeight uint64) bool {
|
||||
if !v.Valid {
|
||||
return false
|
||||
}
|
||||
if timeOutBlock == 0 {
|
||||
// No timeout
|
||||
v.Valid = true
|
||||
return v.Valid
|
||||
}
|
||||
if timeOutBlock < blockHeight {
|
||||
v.Valid = false
|
||||
v.Reason = BLOCK_HEIGHT_TIMEOUT
|
||||
return v.Valid
|
||||
}
|
||||
v.Valid = true
|
||||
return v.Valid
|
||||
}
|
||||
|
||||
func (v *PurchaseValidator) VerifySignature(purchase *protobuf.ActionPurchase, deploy *entity.NodeSale) bool {
|
||||
if !v.Valid {
|
||||
return false
|
||||
}
|
||||
payload := purchase.Payload
|
||||
payloadBytes, _ := proto.Marshal(payload)
|
||||
signatureBytes, _ := hex.DecodeString(purchase.SellerSignature)
|
||||
signature, err := ecdsa.ParseSignature(signatureBytes)
|
||||
if err != nil {
|
||||
v.Valid = false
|
||||
v.Reason = INVALID_SIGNATURE_FORMAT
|
||||
return v.Valid
|
||||
}
|
||||
hash := chainhash.DoubleHashB(payloadBytes)
|
||||
pubkeyBytes, _ := hex.DecodeString(deploy.SellerPublicKey)
|
||||
pubKey, _ := btcec.ParsePubKey(pubkeyBytes)
|
||||
verified := signature.Verify(hash[:], pubKey)
|
||||
if !verified {
|
||||
v.Valid = false
|
||||
v.Reason = INVALID_SIGNATURE
|
||||
return v.Valid
|
||||
}
|
||||
v.Valid = true
|
||||
return v.Valid
|
||||
}
|
||||
|
||||
type TierMap struct {
|
||||
Tiers []protobuf.Tier
|
||||
BuyingTiersCount []uint32
|
||||
NodeIdToTier map[uint32]int32
|
||||
}
|
||||
|
||||
func (v *PurchaseValidator) ValidTiers(
|
||||
payload *protobuf.PurchasePayload,
|
||||
deploy *entity.NodeSale,
|
||||
) (bool, TierMap) {
|
||||
if !v.Valid {
|
||||
return false, TierMap{}
|
||||
}
|
||||
tiers := make([]protobuf.Tier, len(deploy.Tiers))
|
||||
buyingTiersCount := make([]uint32, len(tiers))
|
||||
nodeIdToTier := make(map[uint32]int32)
|
||||
|
||||
for i, tierJson := range deploy.Tiers {
|
||||
tier := &tiers[i]
|
||||
err := protojson.Unmarshal(tierJson, tier)
|
||||
if err != nil {
|
||||
v.Valid = false
|
||||
v.Reason = INVALID_TIER_JSON
|
||||
return v.Valid, TierMap{}
|
||||
}
|
||||
}
|
||||
|
||||
slices.Sort(payload.NodeIDs)
|
||||
|
||||
var currentTier int32 = -1
|
||||
var tierSum uint32 = 0
|
||||
for _, nodeId := range payload.NodeIDs {
|
||||
for nodeId >= tierSum && currentTier < int32(len(tiers)-1) {
|
||||
currentTier++
|
||||
tierSum += tiers[currentTier].Limit
|
||||
}
|
||||
if nodeId < tierSum {
|
||||
buyingTiersCount[currentTier]++
|
||||
nodeIdToTier[nodeId] = currentTier
|
||||
} else {
|
||||
v.Valid = false
|
||||
v.Reason = INVALID_NODE_ID
|
||||
return false, TierMap{}
|
||||
}
|
||||
}
|
||||
v.Valid = true
|
||||
return v.Valid, TierMap{
|
||||
Tiers: tiers,
|
||||
BuyingTiersCount: buyingTiersCount,
|
||||
NodeIdToTier: nodeIdToTier,
|
||||
}
|
||||
}
|
||||
|
||||
func (v *PurchaseValidator) ValidUnpurchasedNodes(
|
||||
ctx context.Context,
|
||||
qtx datagateway.NodeSaleDataGatewayWithTx,
|
||||
payload *protobuf.PurchasePayload,
|
||||
) (bool, error) {
|
||||
if !v.Valid {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// valid unpurchased node ID
|
||||
nodes, err := qtx.GetNodesByIds(ctx, datagateway.GetNodesByIdsParams{
|
||||
SaleBlock: payload.DeployID.Block,
|
||||
SaleTxIndex: payload.DeployID.TxIndex,
|
||||
NodeIds: payload.NodeIDs,
|
||||
})
|
||||
if err != nil {
|
||||
v.Valid = false
|
||||
return v.Valid, errors.Wrap(err, "Failed to Get nodes")
|
||||
}
|
||||
if len(nodes) > 0 {
|
||||
v.Valid = false
|
||||
v.Reason = NODE_ALREADY_PURCHASED
|
||||
return false, nil
|
||||
}
|
||||
v.Valid = true
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (v *PurchaseValidator) ValidPaidAmount(
|
||||
payload *protobuf.PurchasePayload,
|
||||
deploy *entity.NodeSale,
|
||||
txPaid uint64,
|
||||
tiers []protobuf.Tier,
|
||||
buyingTiersCount []uint32,
|
||||
network *chaincfg.Params,
|
||||
) (bool, *entity.MetadataEventPurchase) {
|
||||
if !v.Valid {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
meta := entity.MetadataEventPurchase{}
|
||||
|
||||
meta.PaidTotalAmount = txPaid
|
||||
meta.ReportedTotalAmount = uint64(payload.TotalAmountSat)
|
||||
// total amount paid is greater than report paid
|
||||
if txPaid < uint64(payload.TotalAmountSat) {
|
||||
v.Valid = false
|
||||
v.Reason = INVALID_PAYMENT
|
||||
return v.Valid, nil
|
||||
}
|
||||
// calculate total price
|
||||
var totalPrice uint64 = 0
|
||||
for i := 0; i < len(tiers); i++ {
|
||||
totalPrice += uint64(buyingTiersCount[i] * tiers[i].PriceSat)
|
||||
}
|
||||
// report paid is greater than max discounted total price
|
||||
maxDiscounted := totalPrice * (100 - uint64(deploy.MaxDiscountPercentage))
|
||||
decimal := maxDiscounted % 100
|
||||
maxDiscounted /= 100
|
||||
if decimal%100 >= 50 {
|
||||
maxDiscounted++
|
||||
}
|
||||
meta.ExpectedTotalAmountDiscounted = maxDiscounted
|
||||
if uint64(payload.TotalAmountSat) < maxDiscounted {
|
||||
v.Valid = false
|
||||
v.Reason = INSUFFICIENT_FUND
|
||||
return v.Valid, nil
|
||||
}
|
||||
v.Valid = true
|
||||
return v.Valid, &meta
|
||||
}
|
||||
|
||||
func (v *PurchaseValidator) WithinLimit(
|
||||
ctx context.Context,
|
||||
qtx datagateway.NodeSaleDataGatewayWithTx,
|
||||
payload *protobuf.PurchasePayload,
|
||||
deploy *entity.NodeSale,
|
||||
tiers []protobuf.Tier,
|
||||
buyingTiersCount []uint32,
|
||||
) (bool, error) {
|
||||
if !v.Valid {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// check node limit
|
||||
// get all selled by seller and owned by buyer
|
||||
buyerOwnedNodes, err := qtx.GetNodesByOwner(ctx, datagateway.GetNodesByOwnerParams{
|
||||
SaleBlock: deploy.BlockHeight,
|
||||
SaleTxIndex: deploy.TxIndex,
|
||||
OwnerPublicKey: payload.BuyerPublicKey,
|
||||
})
|
||||
if err != nil {
|
||||
v.Valid = false
|
||||
return v.Valid, errors.Wrap(err, "Failed to GetNodesByOwner")
|
||||
}
|
||||
if len(buyerOwnedNodes)+len(payload.NodeIDs) > int(deploy.MaxPerAddress) {
|
||||
v.Valid = false
|
||||
v.Reason = "Purchase over limit per address."
|
||||
return v.Valid, nil
|
||||
}
|
||||
|
||||
// check limit
|
||||
// count each tiers
|
||||
// check limited for each tier
|
||||
ownedTiersCount := make([]uint32, len(tiers))
|
||||
for _, node := range buyerOwnedNodes {
|
||||
ownedTiersCount[node.TierIndex]++
|
||||
}
|
||||
for i := 0; i < len(tiers); i++ {
|
||||
if ownedTiersCount[i]+buyingTiersCount[i] > tiers[i].MaxPerAddress {
|
||||
v.Valid = false
|
||||
v.Reason = "Purchase over limit per tier."
|
||||
return v.Valid, nil
|
||||
}
|
||||
}
|
||||
v.Valid = true
|
||||
return v.Valid, nil
|
||||
}
|
||||
44
modules/nodesale/internal/validator/validator.go
Normal file
44
modules/nodesale/internal/validator/validator.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec/v2"
|
||||
)
|
||||
|
||||
type Validator struct {
|
||||
Valid bool
|
||||
Reason string
|
||||
}
|
||||
|
||||
func New() *Validator {
|
||||
return &Validator{
|
||||
Valid: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (v *Validator) EqualXonlyPublicKey(target string, expected *btcec.PublicKey) bool {
|
||||
if !v.Valid {
|
||||
return false
|
||||
}
|
||||
targetBytes, err := hex.DecodeString(target)
|
||||
if err != nil {
|
||||
v.Valid = false
|
||||
v.Reason = INVALID_PUBKEY_FORMAT
|
||||
}
|
||||
|
||||
targetPubKey, err := btcec.ParsePubKey(targetBytes)
|
||||
if err != nil {
|
||||
v.Valid = false
|
||||
v.Reason = INVALID_PUBKEY_FORMAT
|
||||
}
|
||||
xOnlyTargetPubKey := btcec.ToSerialized(targetPubKey).SchnorrSerialized()
|
||||
xOnlyExpectedPubKey := btcec.ToSerialized(expected).SchnorrSerialized()
|
||||
|
||||
v.Valid = bytes.Equal(xOnlyTargetPubKey[:], xOnlyExpectedPubKey[:])
|
||||
if !v.Valid {
|
||||
v.Reason = INVALID_PUBKEY
|
||||
}
|
||||
return v.Valid
|
||||
}
|
||||
61
modules/nodesale/nodesale.go
Normal file
61
modules/nodesale/nodesale.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package nodesale
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/btcsuite/btcd/rpcclient"
|
||||
"github.com/gaze-network/indexer-network/core/datasources"
|
||||
"github.com/gaze-network/indexer-network/core/indexer"
|
||||
"github.com/gaze-network/indexer-network/internal/config"
|
||||
"github.com/gaze-network/indexer-network/internal/postgres"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/api/httphandler"
|
||||
repository "github.com/gaze-network/indexer-network/modules/nodesale/repository/postgres"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/samber/do/v2"
|
||||
)
|
||||
|
||||
var NODESALE_MAGIC = []byte{0x6e, 0x73, 0x6f, 0x70}
|
||||
|
||||
const (
|
||||
Version = "v0.0.1-alpha"
|
||||
)
|
||||
|
||||
func New(injector do.Injector) (indexer.IndexerWorker, error) {
|
||||
ctx := do.MustInvoke[context.Context](injector)
|
||||
conf := do.MustInvoke[config.Config](injector)
|
||||
|
||||
btcClient := do.MustInvoke[*rpcclient.Client](injector)
|
||||
datasource := datasources.NewBitcoinNode(btcClient)
|
||||
|
||||
pg, err := postgres.NewPool(ctx, conf.Modules.NodeSale.Postgres)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Can't create postgres connection : %w", err)
|
||||
}
|
||||
var cleanupFuncs []func(context.Context) error
|
||||
cleanupFuncs = append(cleanupFuncs, func(ctx context.Context) error {
|
||||
pg.Close()
|
||||
return nil
|
||||
})
|
||||
repository := repository.NewRepository(pg)
|
||||
|
||||
processor := &Processor{
|
||||
NodeSaleDg: repository,
|
||||
BtcClient: datasource,
|
||||
Network: conf.Network,
|
||||
cleanupFuncs: cleanupFuncs,
|
||||
lastBlockDefault: conf.Modules.NodeSale.LastBlockDefault,
|
||||
}
|
||||
|
||||
httpServer := do.MustInvoke[*fiber.App](injector)
|
||||
nodeSaleHandler := httphandler.New(repository)
|
||||
if err := nodeSaleHandler.Mount(httpServer); err != nil {
|
||||
return nil, fmt.Errorf("Can't mount nodesale API : %w", err)
|
||||
}
|
||||
logger.InfoContext(ctx, "Mounted nodesale HTTP handler")
|
||||
|
||||
indexer := indexer.New(processor, datasource)
|
||||
logger.InfoContext(ctx, "NodeSale module started.")
|
||||
return indexer, nil
|
||||
}
|
||||
61
modules/nodesale/nodesale_test.go
Normal file
61
modules/nodesale/nodesale_test.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package nodesale
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/txscript"
|
||||
"github.com/decred/dcrd/dcrec/secp256k1/v4"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/protobuf"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var (
|
||||
testBlockHeight uint64 = 101
|
||||
testTxIndex uint32 = 1
|
||||
)
|
||||
|
||||
func assembleTestEvent(privateKey *secp256k1.PrivateKey, blockHashHex, txHashHex string, blockHeight uint64, txIndex uint32, message *protobuf.NodeSaleEvent) (NodeSaleEvent, *types.Block) {
|
||||
blockHash, _ := chainhash.NewHashFromStr(blockHashHex)
|
||||
txHash, _ := chainhash.NewHashFromStr(txHashHex)
|
||||
|
||||
rawData, _ := proto.Marshal(message)
|
||||
|
||||
builder := txscript.NewScriptBuilder()
|
||||
builder.AddOp(txscript.OP_FALSE)
|
||||
builder.AddOp(txscript.OP_IF)
|
||||
builder.AddData(rawData)
|
||||
builder.AddOp(txscript.OP_ENDIF)
|
||||
|
||||
messageJson, _ := protojson.Marshal(message)
|
||||
|
||||
if blockHeight == 0 {
|
||||
blockHeight = testBlockHeight
|
||||
testBlockHeight++
|
||||
}
|
||||
if txIndex == 0 {
|
||||
txIndex = testTxIndex
|
||||
testTxIndex++
|
||||
}
|
||||
|
||||
event := NodeSaleEvent{
|
||||
Transaction: &types.Transaction{
|
||||
BlockHeight: int64(blockHeight),
|
||||
BlockHash: *blockHash,
|
||||
Index: uint32(txIndex),
|
||||
TxHash: *txHash,
|
||||
},
|
||||
RawData: rawData,
|
||||
EventMessage: message,
|
||||
EventJson: messageJson,
|
||||
TxPubkey: privateKey.PubKey(),
|
||||
}
|
||||
block := &types.Block{
|
||||
Header: types.BlockHeader{
|
||||
Timestamp: time.Now().UTC(),
|
||||
},
|
||||
}
|
||||
return event, block
|
||||
}
|
||||
303
modules/nodesale/processor.go
Normal file
303
modules/nodesale/processor.go
Normal file
@@ -0,0 +1,303 @@
|
||||
package nodesale
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec/v2"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/txscript"
|
||||
"github.com/gaze-network/indexer-network/common"
|
||||
"github.com/gaze-network/indexer-network/core/indexer"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/core/datasources"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/protobuf"
|
||||
)
|
||||
|
||||
type NodeSaleEvent struct {
|
||||
Transaction *types.Transaction
|
||||
EventMessage *protobuf.NodeSaleEvent
|
||||
EventJson []byte
|
||||
TxPubkey *btcec.PublicKey
|
||||
RawData []byte
|
||||
InputValue uint64
|
||||
}
|
||||
|
||||
func NewProcessor(repository datagateway.NodeSaleDataGateway,
|
||||
datasource *datasources.BitcoinNodeDatasource,
|
||||
network common.Network,
|
||||
cleanupFuncs []func(context.Context) error,
|
||||
lastBlockDefault int64,
|
||||
) *Processor {
|
||||
return &Processor{
|
||||
NodeSaleDg: repository,
|
||||
BtcClient: datasource,
|
||||
Network: network,
|
||||
cleanupFuncs: cleanupFuncs,
|
||||
lastBlockDefault: lastBlockDefault,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Processor) Shutdown(ctx context.Context) error {
|
||||
for _, cleanupFunc := range p.cleanupFuncs {
|
||||
err := cleanupFunc(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cleanup function error")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Processor struct {
|
||||
NodeSaleDg datagateway.NodeSaleDataGateway
|
||||
BtcClient *datasources.BitcoinNodeDatasource
|
||||
Network common.Network
|
||||
cleanupFuncs []func(context.Context) error
|
||||
lastBlockDefault int64
|
||||
}
|
||||
|
||||
// CurrentBlock implements indexer.Processor.
|
||||
func (p *Processor) CurrentBlock(ctx context.Context) (types.BlockHeader, error) {
|
||||
block, err := p.NodeSaleDg.GetLastProcessedBlock(ctx)
|
||||
if err != nil {
|
||||
logger.InfoContext(ctx, "Couldn't get last processed block. Start from NODESALE_LAST_BLOCK_DEFAULT.",
|
||||
slogx.Int64("currentBlock", p.lastBlockDefault))
|
||||
header, err := p.BtcClient.GetBlockHeader(ctx, p.lastBlockDefault)
|
||||
if err != nil {
|
||||
return types.BlockHeader{}, errors.Wrap(err, "Cannot get default block from bitcoin node")
|
||||
}
|
||||
return types.BlockHeader{
|
||||
Hash: header.Hash,
|
||||
Height: p.lastBlockDefault,
|
||||
}, nil
|
||||
}
|
||||
|
||||
hash, err := chainhash.NewHashFromStr(block.BlockHash)
|
||||
if err != nil {
|
||||
logger.PanicContext(ctx, "Invalid hash format found in Database.")
|
||||
}
|
||||
return types.BlockHeader{
|
||||
Hash: *hash,
|
||||
Height: block.BlockHeight,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetIndexedBlock implements indexer.Processor.
|
||||
func (p *Processor) GetIndexedBlock(ctx context.Context, height int64) (types.BlockHeader, error) {
|
||||
block, err := p.NodeSaleDg.GetBlock(ctx, height)
|
||||
if err != nil {
|
||||
return types.BlockHeader{}, errors.Wrapf(err, "Block %d not found", height)
|
||||
}
|
||||
hash, err := chainhash.NewHashFromStr(block.BlockHash)
|
||||
if err != nil {
|
||||
logger.PanicContext(ctx, "Invalid hash format found in Database.")
|
||||
}
|
||||
return types.BlockHeader{
|
||||
Hash: *hash,
|
||||
Height: block.BlockHeight,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Name implements indexer.Processor.
|
||||
func (p *Processor) Name() string {
|
||||
return "nodesale"
|
||||
}
|
||||
|
||||
func extractNodeSaleData(witness [][]byte) (data []byte, internalPubkey *btcec.PublicKey, isNodeSale bool) {
|
||||
tokenizer, controlBlock, isTapScript := extractTapScript(witness)
|
||||
if !isTapScript {
|
||||
return []byte{}, nil, false
|
||||
}
|
||||
state := 0
|
||||
for tokenizer.Next() {
|
||||
switch state {
|
||||
case 0:
|
||||
if tokenizer.Opcode() == txscript.OP_0 {
|
||||
state++
|
||||
} else {
|
||||
state = 0
|
||||
}
|
||||
case 1:
|
||||
if tokenizer.Opcode() == txscript.OP_IF {
|
||||
state++
|
||||
} else {
|
||||
state = 0
|
||||
}
|
||||
case 2:
|
||||
if tokenizer.Opcode() == txscript.OP_DATA_4 &&
|
||||
bytes.Equal(tokenizer.Data(), NODESALE_MAGIC) {
|
||||
state++
|
||||
} else {
|
||||
state = 0
|
||||
}
|
||||
case 3:
|
||||
// Any instruction > txscript.OP_16 is not push data. Note: txscript.OP_PUSHDATAX < txscript.OP_16
|
||||
if tokenizer.Opcode() <= txscript.OP_16 {
|
||||
data := tokenizer.Data()
|
||||
return data, controlBlock.InternalKey, true
|
||||
}
|
||||
state = 0
|
||||
}
|
||||
}
|
||||
return []byte{}, nil, false
|
||||
}
|
||||
|
||||
func (p *Processor) parseTransactions(ctx context.Context, transactions []*types.Transaction) ([]NodeSaleEvent, error) {
|
||||
var events []NodeSaleEvent
|
||||
for _, t := range transactions {
|
||||
for _, txIn := range t.TxIn {
|
||||
data, txPubkey, isNodeSale := extractNodeSaleData(txIn.Witness)
|
||||
if !isNodeSale {
|
||||
continue
|
||||
}
|
||||
|
||||
event := &protobuf.NodeSaleEvent{}
|
||||
err := proto.Unmarshal(data, event)
|
||||
if err != nil {
|
||||
logger.WarnContext(ctx, "Invalid Protobuf",
|
||||
slogx.String("block_hash", t.BlockHash.String()),
|
||||
slogx.Int("txIndex", int(t.Index)))
|
||||
continue
|
||||
}
|
||||
eventJson, err := protojson.Marshal(event)
|
||||
if err != nil {
|
||||
return []NodeSaleEvent{}, errors.Wrap(err, "Failed to parse protobuf to json")
|
||||
}
|
||||
|
||||
prevTx, _, err := p.BtcClient.GetRawTransactionAndHeightByTxHash(ctx, txIn.PreviousOutTxHash)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Failed to get Previous transaction data")
|
||||
}
|
||||
|
||||
if txIn.PreviousOutIndex >= uint32(len(prevTx.TxOut)) {
|
||||
return nil, errors.Wrap(err, "Invalid previous transaction from bitcoin")
|
||||
}
|
||||
|
||||
events = append(events, NodeSaleEvent{
|
||||
Transaction: t,
|
||||
EventMessage: event,
|
||||
EventJson: eventJson,
|
||||
RawData: data,
|
||||
TxPubkey: txPubkey,
|
||||
InputValue: uint64(prevTx.TxOut[txIn.PreviousOutIndex].Value),
|
||||
})
|
||||
}
|
||||
}
|
||||
return events, nil
|
||||
}
|
||||
|
||||
// Process implements indexer.Processor.
|
||||
func (p *Processor) Process(ctx context.Context, inputs []*types.Block) error {
|
||||
for _, block := range inputs {
|
||||
logger.InfoContext(ctx, "NodeSale processing a block",
|
||||
slogx.Int64("block", block.Header.Height),
|
||||
slogx.Stringer("hash", block.Header.Hash))
|
||||
// parse all event from each transaction including reading tx wallet
|
||||
events, err := p.parseTransactions(ctx, block.Transactions)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Invalid data from bitcoin client")
|
||||
}
|
||||
// open transaction
|
||||
qtx, err := p.NodeSaleDg.BeginNodeSaleTx(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to create transaction")
|
||||
}
|
||||
defer func() {
|
||||
err = qtx.Rollback(ctx)
|
||||
if err != nil {
|
||||
logger.PanicContext(ctx, "Failed to rollback db")
|
||||
}
|
||||
}()
|
||||
|
||||
// write block
|
||||
err = qtx.CreateBlock(ctx, entity.Block{
|
||||
BlockHeight: block.Header.Height,
|
||||
BlockHash: block.Header.Hash.String(),
|
||||
Module: p.Name(),
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Failed to add block %d", block.Header.Height)
|
||||
}
|
||||
// for each events
|
||||
for _, event := range events {
|
||||
logger.InfoContext(ctx, "NodeSale processing event",
|
||||
slogx.Uint32("txIndex", event.Transaction.Index),
|
||||
slogx.Int64("blockHeight", block.Header.Height),
|
||||
slogx.Stringer("blockhash", block.Header.Hash),
|
||||
)
|
||||
eventMessage := event.EventMessage
|
||||
switch eventMessage.Action {
|
||||
case protobuf.Action_ACTION_DEPLOY:
|
||||
err = p.ProcessDeploy(ctx, qtx, block, event)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Failed to deploy at block %d", block.Header.Height)
|
||||
}
|
||||
case protobuf.Action_ACTION_DELEGATE:
|
||||
err = p.ProcessDelegate(ctx, qtx, block, event)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Failed to delegate at block %d", block.Header.Height)
|
||||
}
|
||||
case protobuf.Action_ACTION_PURCHASE:
|
||||
err = p.ProcessPurchase(ctx, qtx, block, event)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Failed to purchase at block %d", block.Header.Height)
|
||||
}
|
||||
default:
|
||||
logger.DebugContext(ctx, "Invalid event ACTION", slogx.Stringer("txHash", (event.Transaction.TxHash)))
|
||||
}
|
||||
}
|
||||
// close transaction
|
||||
err = qtx.Commit(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to commit transaction")
|
||||
}
|
||||
logger.InfoContext(ctx, "NodeSale finished processing block",
|
||||
slogx.Int64("block", block.Header.Height),
|
||||
slogx.Stringer("hash", block.Header.Hash))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RevertData implements indexer.Processor.
|
||||
func (p *Processor) RevertData(ctx context.Context, from int64) error {
|
||||
qtx, err := p.NodeSaleDg.BeginNodeSaleTx(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to create transaction")
|
||||
}
|
||||
defer func() { err = qtx.Rollback(ctx) }()
|
||||
_, err = qtx.RemoveBlockFrom(ctx, from)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to remove blocks.")
|
||||
}
|
||||
|
||||
affected, err := qtx.RemoveEventsFromBlock(ctx, from)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to remove events.")
|
||||
}
|
||||
_, err = qtx.ClearDelegate(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to clear delegate from nodes")
|
||||
}
|
||||
err = qtx.Commit(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to commit transaction")
|
||||
}
|
||||
logger.InfoContext(ctx, "Events removed",
|
||||
slogx.Int64("Total removed", affected))
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyStates implements indexer.Processor.
|
||||
func (p *Processor) VerifyStates(ctx context.Context) error {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
var _ indexer.Processor[*types.Block] = (*Processor)(nil)
|
||||
806
modules/nodesale/protobuf/nodesale.pb.go
Normal file
806
modules/nodesale/protobuf/nodesale.pb.go
Normal file
@@ -0,0 +1,806 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.34.1
|
||||
// protoc v5.26.1
|
||||
// source: modules/nodesale/protobuf/nodesale.proto
|
||||
|
||||
// protoc modules/nodesale/protobuf/nodesale.proto --go_out=. --go_opt=module=github.com/gaze-network/indexer-network
|
||||
|
||||
package protobuf
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type Action int32
|
||||
|
||||
const (
|
||||
Action_ACTION_DEPLOY Action = 0
|
||||
Action_ACTION_PURCHASE Action = 1
|
||||
Action_ACTION_DELEGATE Action = 2
|
||||
)
|
||||
|
||||
// Enum value maps for Action.
|
||||
var (
|
||||
Action_name = map[int32]string{
|
||||
0: "ACTION_DEPLOY",
|
||||
1: "ACTION_PURCHASE",
|
||||
2: "ACTION_DELEGATE",
|
||||
}
|
||||
Action_value = map[string]int32{
|
||||
"ACTION_DEPLOY": 0,
|
||||
"ACTION_PURCHASE": 1,
|
||||
"ACTION_DELEGATE": 2,
|
||||
}
|
||||
)
|
||||
|
||||
func (x Action) Enum() *Action {
|
||||
p := new(Action)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
|
||||
func (x Action) String() string {
|
||||
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
|
||||
}
|
||||
|
||||
func (Action) Descriptor() protoreflect.EnumDescriptor {
|
||||
return file_modules_nodesale_protobuf_nodesale_proto_enumTypes[0].Descriptor()
|
||||
}
|
||||
|
||||
func (Action) Type() protoreflect.EnumType {
|
||||
return &file_modules_nodesale_protobuf_nodesale_proto_enumTypes[0]
|
||||
}
|
||||
|
||||
func (x Action) Number() protoreflect.EnumNumber {
|
||||
return protoreflect.EnumNumber(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Action.Descriptor instead.
|
||||
func (Action) EnumDescriptor() ([]byte, []int) {
|
||||
return file_modules_nodesale_protobuf_nodesale_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
type NodeSaleEvent struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Action Action `protobuf:"varint,1,opt,name=action,proto3,enum=nodesale.Action" json:"action,omitempty"`
|
||||
Deploy *ActionDeploy `protobuf:"bytes,2,opt,name=deploy,proto3,oneof" json:"deploy,omitempty"`
|
||||
Purchase *ActionPurchase `protobuf:"bytes,3,opt,name=purchase,proto3,oneof" json:"purchase,omitempty"`
|
||||
Delegate *ActionDelegate `protobuf:"bytes,4,opt,name=delegate,proto3,oneof" json:"delegate,omitempty"`
|
||||
}
|
||||
|
||||
func (x *NodeSaleEvent) Reset() {
|
||||
*x = NodeSaleEvent{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *NodeSaleEvent) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*NodeSaleEvent) ProtoMessage() {}
|
||||
|
||||
func (x *NodeSaleEvent) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use NodeSaleEvent.ProtoReflect.Descriptor instead.
|
||||
func (*NodeSaleEvent) Descriptor() ([]byte, []int) {
|
||||
return file_modules_nodesale_protobuf_nodesale_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *NodeSaleEvent) GetAction() Action {
|
||||
if x != nil {
|
||||
return x.Action
|
||||
}
|
||||
return Action_ACTION_DEPLOY
|
||||
}
|
||||
|
||||
func (x *NodeSaleEvent) GetDeploy() *ActionDeploy {
|
||||
if x != nil {
|
||||
return x.Deploy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *NodeSaleEvent) GetPurchase() *ActionPurchase {
|
||||
if x != nil {
|
||||
return x.Purchase
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *NodeSaleEvent) GetDelegate() *ActionDelegate {
|
||||
if x != nil {
|
||||
return x.Delegate
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ActionDeploy struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
StartsAt uint32 `protobuf:"varint,2,opt,name=startsAt,proto3" json:"startsAt,omitempty"`
|
||||
EndsAt uint32 `protobuf:"varint,3,opt,name=endsAt,proto3" json:"endsAt,omitempty"`
|
||||
Tiers []*Tier `protobuf:"bytes,4,rep,name=tiers,proto3" json:"tiers,omitempty"`
|
||||
SellerPublicKey string `protobuf:"bytes,5,opt,name=sellerPublicKey,proto3" json:"sellerPublicKey,omitempty"`
|
||||
MaxPerAddress uint32 `protobuf:"varint,6,opt,name=maxPerAddress,proto3" json:"maxPerAddress,omitempty"`
|
||||
MaxDiscountPercentage uint32 `protobuf:"varint,7,opt,name=maxDiscountPercentage,proto3" json:"maxDiscountPercentage,omitempty"`
|
||||
SellerWallet string `protobuf:"bytes,8,opt,name=sellerWallet,proto3" json:"sellerWallet,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ActionDeploy) Reset() {
|
||||
*x = ActionDeploy{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ActionDeploy) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ActionDeploy) ProtoMessage() {}
|
||||
|
||||
func (x *ActionDeploy) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ActionDeploy.ProtoReflect.Descriptor instead.
|
||||
func (*ActionDeploy) Descriptor() ([]byte, []int) {
|
||||
return file_modules_nodesale_protobuf_nodesale_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *ActionDeploy) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ActionDeploy) GetStartsAt() uint32 {
|
||||
if x != nil {
|
||||
return x.StartsAt
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *ActionDeploy) GetEndsAt() uint32 {
|
||||
if x != nil {
|
||||
return x.EndsAt
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *ActionDeploy) GetTiers() []*Tier {
|
||||
if x != nil {
|
||||
return x.Tiers
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ActionDeploy) GetSellerPublicKey() string {
|
||||
if x != nil {
|
||||
return x.SellerPublicKey
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ActionDeploy) GetMaxPerAddress() uint32 {
|
||||
if x != nil {
|
||||
return x.MaxPerAddress
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *ActionDeploy) GetMaxDiscountPercentage() uint32 {
|
||||
if x != nil {
|
||||
return x.MaxDiscountPercentage
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *ActionDeploy) GetSellerWallet() string {
|
||||
if x != nil {
|
||||
return x.SellerWallet
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type Tier struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
PriceSat uint32 `protobuf:"varint,1,opt,name=priceSat,proto3" json:"priceSat,omitempty"`
|
||||
Limit uint32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"`
|
||||
MaxPerAddress uint32 `protobuf:"varint,3,opt,name=maxPerAddress,proto3" json:"maxPerAddress,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Tier) Reset() {
|
||||
*x = Tier{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Tier) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Tier) ProtoMessage() {}
|
||||
|
||||
func (x *Tier) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Tier.ProtoReflect.Descriptor instead.
|
||||
func (*Tier) Descriptor() ([]byte, []int) {
|
||||
return file_modules_nodesale_protobuf_nodesale_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *Tier) GetPriceSat() uint32 {
|
||||
if x != nil {
|
||||
return x.PriceSat
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Tier) GetLimit() uint32 {
|
||||
if x != nil {
|
||||
return x.Limit
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Tier) GetMaxPerAddress() uint32 {
|
||||
if x != nil {
|
||||
return x.MaxPerAddress
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type ActionPurchase struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Payload *PurchasePayload `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"`
|
||||
SellerSignature string `protobuf:"bytes,2,opt,name=sellerSignature,proto3" json:"sellerSignature,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ActionPurchase) Reset() {
|
||||
*x = ActionPurchase{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ActionPurchase) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ActionPurchase) ProtoMessage() {}
|
||||
|
||||
func (x *ActionPurchase) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ActionPurchase.ProtoReflect.Descriptor instead.
|
||||
func (*ActionPurchase) Descriptor() ([]byte, []int) {
|
||||
return file_modules_nodesale_protobuf_nodesale_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *ActionPurchase) GetPayload() *PurchasePayload {
|
||||
if x != nil {
|
||||
return x.Payload
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ActionPurchase) GetSellerSignature() string {
|
||||
if x != nil {
|
||||
return x.SellerSignature
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type PurchasePayload struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
DeployID *ActionID `protobuf:"bytes,1,opt,name=deployID,proto3" json:"deployID,omitempty"`
|
||||
BuyerPublicKey string `protobuf:"bytes,2,opt,name=buyerPublicKey,proto3" json:"buyerPublicKey,omitempty"`
|
||||
NodeIDs []uint32 `protobuf:"varint,3,rep,packed,name=nodeIDs,proto3" json:"nodeIDs,omitempty"`
|
||||
TotalAmountSat int64 `protobuf:"varint,4,opt,name=totalAmountSat,proto3" json:"totalAmountSat,omitempty"`
|
||||
TimeOutBlock uint64 `protobuf:"varint,5,opt,name=timeOutBlock,proto3" json:"timeOutBlock,omitempty"`
|
||||
}
|
||||
|
||||
func (x *PurchasePayload) Reset() {
|
||||
*x = PurchasePayload{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *PurchasePayload) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*PurchasePayload) ProtoMessage() {}
|
||||
|
||||
func (x *PurchasePayload) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use PurchasePayload.ProtoReflect.Descriptor instead.
|
||||
func (*PurchasePayload) Descriptor() ([]byte, []int) {
|
||||
return file_modules_nodesale_protobuf_nodesale_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (x *PurchasePayload) GetDeployID() *ActionID {
|
||||
if x != nil {
|
||||
return x.DeployID
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *PurchasePayload) GetBuyerPublicKey() string {
|
||||
if x != nil {
|
||||
return x.BuyerPublicKey
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *PurchasePayload) GetNodeIDs() []uint32 {
|
||||
if x != nil {
|
||||
return x.NodeIDs
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *PurchasePayload) GetTotalAmountSat() int64 {
|
||||
if x != nil {
|
||||
return x.TotalAmountSat
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *PurchasePayload) GetTimeOutBlock() uint64 {
|
||||
if x != nil {
|
||||
return x.TimeOutBlock
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type ActionID struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Block uint64 `protobuf:"varint,1,opt,name=block,proto3" json:"block,omitempty"`
|
||||
TxIndex uint32 `protobuf:"varint,2,opt,name=txIndex,proto3" json:"txIndex,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ActionID) Reset() {
|
||||
*x = ActionID{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ActionID) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ActionID) ProtoMessage() {}
|
||||
|
||||
func (x *ActionID) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[5]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ActionID.ProtoReflect.Descriptor instead.
|
||||
func (*ActionID) Descriptor() ([]byte, []int) {
|
||||
return file_modules_nodesale_protobuf_nodesale_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *ActionID) GetBlock() uint64 {
|
||||
if x != nil {
|
||||
return x.Block
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *ActionID) GetTxIndex() uint32 {
|
||||
if x != nil {
|
||||
return x.TxIndex
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type ActionDelegate struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
DelegateePublicKey string `protobuf:"bytes,1,opt,name=delegateePublicKey,proto3" json:"delegateePublicKey,omitempty"`
|
||||
NodeIDs []uint32 `protobuf:"varint,2,rep,packed,name=nodeIDs,proto3" json:"nodeIDs,omitempty"`
|
||||
DeployID *ActionID `protobuf:"bytes,3,opt,name=deployID,proto3" json:"deployID,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ActionDelegate) Reset() {
|
||||
*x = ActionDelegate{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[6]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ActionDelegate) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ActionDelegate) ProtoMessage() {}
|
||||
|
||||
func (x *ActionDelegate) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_modules_nodesale_protobuf_nodesale_proto_msgTypes[6]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ActionDelegate.ProtoReflect.Descriptor instead.
|
||||
func (*ActionDelegate) Descriptor() ([]byte, []int) {
|
||||
return file_modules_nodesale_protobuf_nodesale_proto_rawDescGZIP(), []int{6}
|
||||
}
|
||||
|
||||
func (x *ActionDelegate) GetDelegateePublicKey() string {
|
||||
if x != nil {
|
||||
return x.DelegateePublicKey
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ActionDelegate) GetNodeIDs() []uint32 {
|
||||
if x != nil {
|
||||
return x.NodeIDs
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ActionDelegate) GetDeployID() *ActionID {
|
||||
if x != nil {
|
||||
return x.DeployID
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_modules_nodesale_protobuf_nodesale_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_modules_nodesale_protobuf_nodesale_proto_rawDesc = []byte{
|
||||
0x0a, 0x28, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x61,
|
||||
0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x6e, 0x6f, 0x64, 0x65,
|
||||
0x73, 0x61, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x6e, 0x6f, 0x64, 0x65,
|
||||
0x73, 0x61, 0x6c, 0x65, 0x22, 0x89, 0x02, 0x0a, 0x0d, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x61, 0x6c,
|
||||
0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x61, 0x6c,
|
||||
0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x12, 0x33, 0x0a, 0x06, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
|
||||
0x32, 0x16, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x61, 0x6c, 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x48, 0x00, 0x52, 0x06, 0x64, 0x65, 0x70, 0x6c,
|
||||
0x6f, 0x79, 0x88, 0x01, 0x01, 0x12, 0x39, 0x0a, 0x08, 0x70, 0x75, 0x72, 0x63, 0x68, 0x61, 0x73,
|
||||
0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x61,
|
||||
0x6c, 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x75, 0x72, 0x63, 0x68, 0x61, 0x73,
|
||||
0x65, 0x48, 0x01, 0x52, 0x08, 0x70, 0x75, 0x72, 0x63, 0x68, 0x61, 0x73, 0x65, 0x88, 0x01, 0x01,
|
||||
0x12, 0x39, 0x0a, 0x08, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x61, 0x6c, 0x65, 0x2e, 0x41, 0x63,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x48, 0x02, 0x52, 0x08,
|
||||
0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f,
|
||||
0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x70, 0x75, 0x72, 0x63, 0x68,
|
||||
0x61, 0x73, 0x65, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65,
|
||||
0x22, 0xa6, 0x02, 0x0a, 0x0c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x6c, 0x6f,
|
||||
0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x73, 0x41,
|
||||
0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x73, 0x41,
|
||||
0x74, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x64, 0x73, 0x41, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28,
|
||||
0x0d, 0x52, 0x06, 0x65, 0x6e, 0x64, 0x73, 0x41, 0x74, 0x12, 0x24, 0x0a, 0x05, 0x74, 0x69, 0x65,
|
||||
0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x73,
|
||||
0x61, 0x6c, 0x65, 0x2e, 0x54, 0x69, 0x65, 0x72, 0x52, 0x05, 0x74, 0x69, 0x65, 0x72, 0x73, 0x12,
|
||||
0x28, 0x0a, 0x0f, 0x73, 0x65, 0x6c, 0x6c, 0x65, 0x72, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b,
|
||||
0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x65, 0x6c, 0x6c, 0x65, 0x72,
|
||||
0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x0d, 0x6d, 0x61, 0x78,
|
||||
0x50, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d,
|
||||
0x52, 0x0d, 0x6d, 0x61, 0x78, 0x50, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12,
|
||||
0x34, 0x0a, 0x15, 0x6d, 0x61, 0x78, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x65,
|
||||
0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x15,
|
||||
0x6d, 0x61, 0x78, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x65, 0x72, 0x63, 0x65,
|
||||
0x6e, 0x74, 0x61, 0x67, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x65, 0x6c, 0x6c, 0x65, 0x72, 0x57,
|
||||
0x61, 0x6c, 0x6c, 0x65, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65, 0x6c,
|
||||
0x6c, 0x65, 0x72, 0x57, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x22, 0x5e, 0x0a, 0x04, 0x54, 0x69, 0x65,
|
||||
0x72, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x63, 0x65, 0x53, 0x61, 0x74, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x0d, 0x52, 0x08, 0x70, 0x72, 0x69, 0x63, 0x65, 0x53, 0x61, 0x74, 0x12, 0x14, 0x0a,
|
||||
0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69,
|
||||
0x6d, 0x69, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x6d, 0x61, 0x78, 0x50, 0x65, 0x72, 0x41, 0x64, 0x64,
|
||||
0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x50,
|
||||
0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x6f, 0x0a, 0x0e, 0x41, 0x63, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x50, 0x75, 0x72, 0x63, 0x68, 0x61, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x70,
|
||||
0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6e,
|
||||
0x6f, 0x64, 0x65, 0x73, 0x61, 0x6c, 0x65, 0x2e, 0x50, 0x75, 0x72, 0x63, 0x68, 0x61, 0x73, 0x65,
|
||||
0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64,
|
||||
0x12, 0x28, 0x0a, 0x0f, 0x73, 0x65, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74,
|
||||
0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x65, 0x6c, 0x6c, 0x65,
|
||||
0x72, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0xcf, 0x01, 0x0a, 0x0f, 0x50,
|
||||
0x75, 0x72, 0x63, 0x68, 0x61, 0x73, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x2e,
|
||||
0x0a, 0x08, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
|
||||
0x32, 0x12, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x61, 0x6c, 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x49, 0x44, 0x52, 0x08, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x49, 0x44, 0x12, 0x26,
|
||||
0x0a, 0x0e, 0x62, 0x75, 0x79, 0x65, 0x72, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79,
|
||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x62, 0x75, 0x79, 0x65, 0x72, 0x50, 0x75, 0x62,
|
||||
0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x44,
|
||||
0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x73,
|
||||
0x12, 0x26, 0x0a, 0x0e, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x41, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x53,
|
||||
0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x41,
|
||||
0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x61, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65,
|
||||
0x4f, 0x75, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c,
|
||||
0x74, 0x69, 0x6d, 0x65, 0x4f, 0x75, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x0a, 0x08,
|
||||
0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63,
|
||||
0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x18,
|
||||
0x0a, 0x07, 0x74, 0x78, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52,
|
||||
0x07, 0x74, 0x78, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0x8a, 0x01, 0x0a, 0x0e, 0x41, 0x63, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x64,
|
||||
0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65,
|
||||
0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74,
|
||||
0x65, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x6e,
|
||||
0x6f, 0x64, 0x65, 0x49, 0x44, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x07, 0x6e, 0x6f,
|
||||
0x64, 0x65, 0x49, 0x44, 0x73, 0x12, 0x2e, 0x0a, 0x08, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x49,
|
||||
0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x61,
|
||||
0x6c, 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x52, 0x08, 0x64, 0x65, 0x70,
|
||||
0x6c, 0x6f, 0x79, 0x49, 0x44, 0x2a, 0x45, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12,
|
||||
0x11, 0x0a, 0x0d, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x50, 0x4c, 0x4f, 0x59,
|
||||
0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x55, 0x52,
|
||||
0x43, 0x48, 0x41, 0x53, 0x45, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x41, 0x43, 0x54, 0x49, 0x4f,
|
||||
0x4e, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x47, 0x41, 0x54, 0x45, 0x10, 0x02, 0x42, 0x43, 0x5a, 0x41,
|
||||
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x61, 0x7a, 0x65, 0x2d,
|
||||
0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x72, 0x2d,
|
||||
0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x2f,
|
||||
0x6e, 0x6f, 0x64, 0x65, 0x73, 0x61, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
|
||||
0x66, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_modules_nodesale_protobuf_nodesale_proto_rawDescOnce sync.Once
|
||||
file_modules_nodesale_protobuf_nodesale_proto_rawDescData = file_modules_nodesale_protobuf_nodesale_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_modules_nodesale_protobuf_nodesale_proto_rawDescGZIP() []byte {
|
||||
file_modules_nodesale_protobuf_nodesale_proto_rawDescOnce.Do(func() {
|
||||
file_modules_nodesale_protobuf_nodesale_proto_rawDescData = protoimpl.X.CompressGZIP(file_modules_nodesale_protobuf_nodesale_proto_rawDescData)
|
||||
})
|
||||
return file_modules_nodesale_protobuf_nodesale_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_modules_nodesale_protobuf_nodesale_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
|
||||
var file_modules_nodesale_protobuf_nodesale_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
|
||||
var file_modules_nodesale_protobuf_nodesale_proto_goTypes = []interface{}{
|
||||
(Action)(0), // 0: nodesale.Action
|
||||
(*NodeSaleEvent)(nil), // 1: nodesale.NodeSaleEvent
|
||||
(*ActionDeploy)(nil), // 2: nodesale.ActionDeploy
|
||||
(*Tier)(nil), // 3: nodesale.Tier
|
||||
(*ActionPurchase)(nil), // 4: nodesale.ActionPurchase
|
||||
(*PurchasePayload)(nil), // 5: nodesale.PurchasePayload
|
||||
(*ActionID)(nil), // 6: nodesale.ActionID
|
||||
(*ActionDelegate)(nil), // 7: nodesale.ActionDelegate
|
||||
}
|
||||
var file_modules_nodesale_protobuf_nodesale_proto_depIdxs = []int32{
|
||||
0, // 0: nodesale.NodeSaleEvent.action:type_name -> nodesale.Action
|
||||
2, // 1: nodesale.NodeSaleEvent.deploy:type_name -> nodesale.ActionDeploy
|
||||
4, // 2: nodesale.NodeSaleEvent.purchase:type_name -> nodesale.ActionPurchase
|
||||
7, // 3: nodesale.NodeSaleEvent.delegate:type_name -> nodesale.ActionDelegate
|
||||
3, // 4: nodesale.ActionDeploy.tiers:type_name -> nodesale.Tier
|
||||
5, // 5: nodesale.ActionPurchase.payload:type_name -> nodesale.PurchasePayload
|
||||
6, // 6: nodesale.PurchasePayload.deployID:type_name -> nodesale.ActionID
|
||||
6, // 7: nodesale.ActionDelegate.deployID:type_name -> nodesale.ActionID
|
||||
8, // [8:8] is the sub-list for method output_type
|
||||
8, // [8:8] is the sub-list for method input_type
|
||||
8, // [8:8] is the sub-list for extension type_name
|
||||
8, // [8:8] is the sub-list for extension extendee
|
||||
0, // [0:8] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_modules_nodesale_protobuf_nodesale_proto_init() }
|
||||
func file_modules_nodesale_protobuf_nodesale_proto_init() {
|
||||
if File_modules_nodesale_protobuf_nodesale_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_modules_nodesale_protobuf_nodesale_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*NodeSaleEvent); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_modules_nodesale_protobuf_nodesale_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ActionDeploy); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_modules_nodesale_protobuf_nodesale_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Tier); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_modules_nodesale_protobuf_nodesale_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ActionPurchase); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_modules_nodesale_protobuf_nodesale_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*PurchasePayload); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_modules_nodesale_protobuf_nodesale_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ActionID); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_modules_nodesale_protobuf_nodesale_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ActionDelegate); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
file_modules_nodesale_protobuf_nodesale_proto_msgTypes[0].OneofWrappers = []interface{}{}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_modules_nodesale_protobuf_nodesale_proto_rawDesc,
|
||||
NumEnums: 1,
|
||||
NumMessages: 7,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_modules_nodesale_protobuf_nodesale_proto_goTypes,
|
||||
DependencyIndexes: file_modules_nodesale_protobuf_nodesale_proto_depIdxs,
|
||||
EnumInfos: file_modules_nodesale_protobuf_nodesale_proto_enumTypes,
|
||||
MessageInfos: file_modules_nodesale_protobuf_nodesale_proto_msgTypes,
|
||||
}.Build()
|
||||
File_modules_nodesale_protobuf_nodesale_proto = out.File
|
||||
file_modules_nodesale_protobuf_nodesale_proto_rawDesc = nil
|
||||
file_modules_nodesale_protobuf_nodesale_proto_goTypes = nil
|
||||
file_modules_nodesale_protobuf_nodesale_proto_depIdxs = nil
|
||||
}
|
||||
60
modules/nodesale/protobuf/nodesale.proto
Normal file
60
modules/nodesale/protobuf/nodesale.proto
Normal file
@@ -0,0 +1,60 @@
|
||||
syntax = "proto3";
|
||||
|
||||
// protoc modules/nodesale/protobuf/nodesale.proto --go_out=. --go_opt=module=github.com/gaze-network/indexer-network
|
||||
|
||||
package nodesale;
|
||||
option go_package = "github.com/gaze-network/indexer-network/modules/nodesale/protobuf";
|
||||
|
||||
enum Action {
|
||||
ACTION_DEPLOY = 0;
|
||||
ACTION_PURCHASE = 1;
|
||||
ACTION_DELEGATE = 2;
|
||||
}
|
||||
|
||||
message NodeSaleEvent {
|
||||
Action action = 1;
|
||||
optional ActionDeploy deploy = 2;
|
||||
optional ActionPurchase purchase = 3;
|
||||
optional ActionDelegate delegate = 4;
|
||||
}
|
||||
|
||||
message ActionDeploy {
|
||||
string name = 1;
|
||||
uint32 startsAt = 2;
|
||||
uint32 endsAt = 3;
|
||||
repeated Tier tiers = 4;
|
||||
string sellerPublicKey = 5;
|
||||
uint32 maxPerAddress = 6;
|
||||
uint32 maxDiscountPercentage = 7;
|
||||
string sellerWallet = 8;
|
||||
}
|
||||
|
||||
message Tier {
|
||||
uint32 priceSat = 1;
|
||||
uint32 limit = 2;
|
||||
uint32 maxPerAddress = 3;
|
||||
}
|
||||
|
||||
message ActionPurchase {
|
||||
PurchasePayload payload = 1;
|
||||
string sellerSignature = 2;
|
||||
}
|
||||
|
||||
message PurchasePayload {
|
||||
ActionID deployID = 1;
|
||||
string buyerPublicKey = 2;
|
||||
repeated uint32 nodeIDs = 3;
|
||||
int64 totalAmountSat = 4;
|
||||
uint64 timeOutBlock = 5;
|
||||
}
|
||||
|
||||
message ActionID {
|
||||
uint64 block = 1;
|
||||
uint32 txIndex = 2;
|
||||
}
|
||||
|
||||
message ActionDelegate {
|
||||
string delegateePublicKey = 1;
|
||||
repeated uint32 nodeIDs = 2;
|
||||
ActionID deployID = 3;
|
||||
}
|
||||
12
modules/nodesale/pubkeyaddr.go
Normal file
12
modules/nodesale/pubkeyaddr.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package nodesale
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btcd/btcec/v2"
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
)
|
||||
|
||||
func (p *Processor) PubkeyToPkHashAddress(pubKey *btcec.PublicKey) btcutil.Address {
|
||||
addrPubKey, _ := btcutil.NewAddressPubKey(pubKey.SerializeCompressed(), p.Network.ChainParams())
|
||||
addrPubKeyHash := addrPubKey.AddressPubKeyHash()
|
||||
return addrPubKeyHash
|
||||
}
|
||||
87
modules/nodesale/purchase.go
Normal file
87
modules/nodesale/purchase.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package nodesale
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
|
||||
purchasevalidator "github.com/gaze-network/indexer-network/modules/nodesale/internal/validator/purchase"
|
||||
)
|
||||
|
||||
func (p *Processor) ProcessPurchase(ctx context.Context, qtx datagateway.NodeSaleDataGatewayWithTx, block *types.Block, event NodeSaleEvent) error {
|
||||
purchase := event.EventMessage.Purchase
|
||||
payload := purchase.Payload
|
||||
|
||||
validator := purchasevalidator.New()
|
||||
|
||||
validator.EqualXonlyPublicKey(payload.BuyerPublicKey, event.TxPubkey)
|
||||
|
||||
_, deploy, err := validator.NodeSaleExists(ctx, qtx, payload)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot query. Something wrong.")
|
||||
}
|
||||
|
||||
validator.ValidTimestamp(deploy, block.Header.Timestamp)
|
||||
validator.WithinTimeoutBlock(payload.TimeOutBlock, uint64(event.Transaction.BlockHeight))
|
||||
|
||||
validator.VerifySignature(purchase, deploy)
|
||||
|
||||
_, tierMap := validator.ValidTiers(payload, deploy)
|
||||
|
||||
tiers := tierMap.Tiers
|
||||
buyingTiersCount := tierMap.BuyingTiersCount
|
||||
nodeIdToTier := tierMap.NodeIdToTier
|
||||
|
||||
_, err = validator.ValidUnpurchasedNodes(ctx, qtx, payload)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot query. Something wrong.")
|
||||
}
|
||||
|
||||
_, meta := validator.ValidPaidAmount(payload, deploy, event.InputValue, tiers, buyingTiersCount, p.Network.ChainParams())
|
||||
|
||||
_, err = validator.WithinLimit(ctx, qtx, payload, deploy, tiers, buyingTiersCount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot query. Something wrong.")
|
||||
}
|
||||
|
||||
err = qtx.CreateEvent(ctx, entity.NodeSaleEvent{
|
||||
TxHash: event.Transaction.TxHash.String(),
|
||||
TxIndex: int32(event.Transaction.Index),
|
||||
Action: int32(event.EventMessage.Action),
|
||||
RawMessage: event.RawData,
|
||||
ParsedMessage: event.EventJson,
|
||||
BlockTimestamp: block.Header.Timestamp,
|
||||
BlockHash: event.Transaction.BlockHash.String(),
|
||||
BlockHeight: event.Transaction.BlockHeight,
|
||||
Valid: validator.Valid,
|
||||
WalletAddress: p.PubkeyToPkHashAddress(event.TxPubkey).EncodeAddress(),
|
||||
Metadata: meta,
|
||||
Reason: validator.Reason,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to insert event")
|
||||
}
|
||||
|
||||
if validator.Valid {
|
||||
// add to node
|
||||
for _, nodeId := range payload.NodeIDs {
|
||||
err := qtx.CreateNode(ctx, entity.Node{
|
||||
SaleBlock: deploy.BlockHeight,
|
||||
SaleTxIndex: deploy.TxIndex,
|
||||
NodeID: nodeId,
|
||||
TierIndex: nodeIdToTier[nodeId],
|
||||
DelegatedTo: "",
|
||||
OwnerPublicKey: payload.BuyerPublicKey,
|
||||
PurchaseTxHash: event.Transaction.TxHash.String(),
|
||||
DelegateTxHash: "",
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to insert node")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
902
modules/nodesale/purchase_test.go
Normal file
902
modules/nodesale/purchase_test.go
Normal file
@@ -0,0 +1,902 @@
|
||||
package nodesale
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec/v2"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa"
|
||||
"github.com/gaze-network/indexer-network/common"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway/mocks"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/internal/validator"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/internal/validator/purchase"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/protobuf"
|
||||
"github.com/samber/lo"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestInvalidPurchase(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
|
||||
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
|
||||
|
||||
buyerPrivateKey, err := btcec.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
|
||||
|
||||
message := &protobuf.NodeSaleEvent{
|
||||
Action: protobuf.Action_ACTION_PURCHASE,
|
||||
Purchase: &protobuf.ActionPurchase{
|
||||
Payload: &protobuf.PurchasePayload{
|
||||
DeployID: &protobuf.ActionID{
|
||||
Block: 111,
|
||||
TxIndex: 1,
|
||||
},
|
||||
NodeIDs: []uint32{1, 2},
|
||||
BuyerPublicKey: buyerPubkeyHex,
|
||||
TotalAmountSat: 500,
|
||||
TimeOutBlock: uint64(testBlockHeight) + 5,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
event, block := assembleTestEvent(buyerPrivateKey, "030303030303", "030303030303", 0, 0, message)
|
||||
|
||||
mockDgTx.EXPECT().GetNodeSale(mock.Anything, mock.Anything).Return(nil, nil)
|
||||
|
||||
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
|
||||
return event.Valid == false
|
||||
})).Return(nil)
|
||||
|
||||
err = p.ProcessPurchase(ctx, mockDgTx, block, event)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockDgTx.AssertNotCalled(t, "CreateNode")
|
||||
}
|
||||
|
||||
func TestInvalidBuyerKey(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
|
||||
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
|
||||
|
||||
strangerPrivateKey, _ := btcec.NewPrivateKey()
|
||||
strangerPrivateKeyHex := hex.EncodeToString(strangerPrivateKey.PubKey().SerializeCompressed())
|
||||
|
||||
buyerPrivateKey, _ := btcec.NewPrivateKey()
|
||||
|
||||
message := &protobuf.NodeSaleEvent{
|
||||
Action: protobuf.Action_ACTION_PURCHASE,
|
||||
Purchase: &protobuf.ActionPurchase{
|
||||
Payload: &protobuf.PurchasePayload{
|
||||
DeployID: &protobuf.ActionID{
|
||||
Block: 100,
|
||||
TxIndex: 1,
|
||||
},
|
||||
NodeIDs: []uint32{1, 2},
|
||||
BuyerPublicKey: strangerPrivateKeyHex,
|
||||
TotalAmountSat: 200,
|
||||
TimeOutBlock: uint64(testBlockHeight) + 5,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
event, block := assembleTestEvent(buyerPrivateKey, "0707070707", "0707070707", 0, 0, message)
|
||||
block.Header.Timestamp = time.Now().UTC()
|
||||
|
||||
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
|
||||
return event.Valid == false && event.Reason == validator.INVALID_PUBKEY
|
||||
})).Return(nil)
|
||||
|
||||
err := p.ProcessPurchase(ctx, mockDgTx, block, event)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockDgTx.AssertNotCalled(t, "CreateNode")
|
||||
}
|
||||
|
||||
func TestInvalidTimestamp(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
|
||||
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
|
||||
|
||||
sellerPrivateKey, err := btcec.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
sellerPubkeyHex := hex.EncodeToString(sellerPrivateKey.PubKey().SerializeCompressed())
|
||||
sellerWallet := p.PubkeyToPkHashAddress(sellerPrivateKey.PubKey())
|
||||
|
||||
startAt := time.Now().Add(time.Hour * -1)
|
||||
endAt := time.Now().Add(time.Hour * 1)
|
||||
|
||||
tiers := lo.Map([]*protobuf.Tier{
|
||||
{
|
||||
PriceSat: 100,
|
||||
Limit: 5,
|
||||
MaxPerAddress: 100,
|
||||
},
|
||||
{
|
||||
PriceSat: 200,
|
||||
Limit: 5,
|
||||
MaxPerAddress: 100,
|
||||
},
|
||||
}, func(tier *protobuf.Tier, _ int) []byte {
|
||||
tierJson, err := protojson.Marshal(tier)
|
||||
require.NoError(t, err)
|
||||
return tierJson
|
||||
})
|
||||
mockDgTx.EXPECT().GetNodeSale(mock.Anything, datagateway.GetNodeSaleParams{
|
||||
BlockHeight: 100,
|
||||
TxIndex: 1,
|
||||
}).Return([]entity.NodeSale{
|
||||
{
|
||||
BlockHeight: 100,
|
||||
TxIndex: 1,
|
||||
Name: t.Name(),
|
||||
StartsAt: startAt,
|
||||
EndsAt: endAt,
|
||||
Tiers: tiers,
|
||||
SellerPublicKey: sellerPubkeyHex,
|
||||
MaxPerAddress: 100,
|
||||
DeployTxHash: "040404040404",
|
||||
MaxDiscountPercentage: 50,
|
||||
SellerWallet: sellerWallet.EncodeAddress(),
|
||||
},
|
||||
}, nil)
|
||||
|
||||
buyerPrivateKey, _ := btcec.NewPrivateKey()
|
||||
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
|
||||
|
||||
message := &protobuf.NodeSaleEvent{
|
||||
Action: protobuf.Action_ACTION_PURCHASE,
|
||||
Purchase: &protobuf.ActionPurchase{
|
||||
Payload: &protobuf.PurchasePayload{
|
||||
DeployID: &protobuf.ActionID{
|
||||
Block: 100,
|
||||
TxIndex: 1,
|
||||
},
|
||||
NodeIDs: []uint32{1, 2},
|
||||
BuyerPublicKey: buyerPubkeyHex,
|
||||
TotalAmountSat: 200,
|
||||
TimeOutBlock: uint64(testBlockHeight) + 5,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
event, block := assembleTestEvent(buyerPrivateKey, "050505050505", "050505050505", 0, 0, message)
|
||||
|
||||
block.Header.Timestamp = time.Now().UTC().Add(time.Hour * 2)
|
||||
|
||||
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
|
||||
return event.Valid == false && event.Reason == purchase.PURCHASE_TIMEOUT
|
||||
})).Return(nil)
|
||||
|
||||
err = p.ProcessPurchase(ctx, mockDgTx, block, event)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockDgTx.AssertNotCalled(t, "CreateNode")
|
||||
}
|
||||
|
||||
func TestTimeOut(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
|
||||
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
|
||||
|
||||
sellerPrivateKey, _ := btcec.NewPrivateKey()
|
||||
sellerPubkeyHex := hex.EncodeToString(sellerPrivateKey.PubKey().SerializeCompressed())
|
||||
sellerWallet := p.PubkeyToPkHashAddress(sellerPrivateKey.PubKey())
|
||||
|
||||
startAt := time.Now().Add(time.Hour * -1)
|
||||
endAt := time.Now().Add(time.Hour * 1)
|
||||
|
||||
tiers := lo.Map([]*protobuf.Tier{
|
||||
{
|
||||
PriceSat: 100,
|
||||
Limit: 5,
|
||||
MaxPerAddress: 100,
|
||||
},
|
||||
{
|
||||
PriceSat: 200,
|
||||
Limit: 5,
|
||||
MaxPerAddress: 100,
|
||||
},
|
||||
}, func(tier *protobuf.Tier, _ int) []byte {
|
||||
tierJson, err := protojson.Marshal(tier)
|
||||
require.NoError(t, err)
|
||||
return tierJson
|
||||
})
|
||||
|
||||
mockDgTx.EXPECT().GetNodeSale(mock.Anything, datagateway.GetNodeSaleParams{
|
||||
BlockHeight: 100,
|
||||
TxIndex: 1,
|
||||
}).Return([]entity.NodeSale{
|
||||
{
|
||||
BlockHeight: 100,
|
||||
TxIndex: 1,
|
||||
Name: t.Name(),
|
||||
StartsAt: startAt,
|
||||
EndsAt: endAt,
|
||||
Tiers: tiers,
|
||||
SellerPublicKey: sellerPubkeyHex,
|
||||
MaxPerAddress: 100,
|
||||
DeployTxHash: "040404040404",
|
||||
MaxDiscountPercentage: 50,
|
||||
SellerWallet: sellerWallet.EncodeAddress(),
|
||||
},
|
||||
}, nil)
|
||||
|
||||
buyerPrivateKey, _ := btcec.NewPrivateKey()
|
||||
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
|
||||
|
||||
message := &protobuf.NodeSaleEvent{
|
||||
Action: protobuf.Action_ACTION_PURCHASE,
|
||||
Purchase: &protobuf.ActionPurchase{
|
||||
Payload: &protobuf.PurchasePayload{
|
||||
DeployID: &protobuf.ActionID{
|
||||
Block: 100,
|
||||
TxIndex: 1,
|
||||
},
|
||||
NodeIDs: []uint32{1, 2},
|
||||
BuyerPublicKey: buyerPubkeyHex,
|
||||
TimeOutBlock: uint64(testBlockHeight) - 5,
|
||||
TotalAmountSat: 200,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
event, block := assembleTestEvent(buyerPrivateKey, "090909090909", "090909090909", 0, 0, message)
|
||||
|
||||
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
|
||||
return event.Valid == false && event.Reason == purchase.BLOCK_HEIGHT_TIMEOUT
|
||||
})).Return(nil)
|
||||
|
||||
err := p.ProcessPurchase(ctx, mockDgTx, block, event)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockDgTx.AssertNotCalled(t, "CreateNode")
|
||||
}
|
||||
|
||||
func TestSignatureInvalid(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
|
||||
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
|
||||
|
||||
sellerPrivateKey, _ := btcec.NewPrivateKey()
|
||||
sellerPubkeyHex := hex.EncodeToString(sellerPrivateKey.PubKey().SerializeCompressed())
|
||||
sellerWallet := p.PubkeyToPkHashAddress(sellerPrivateKey.PubKey())
|
||||
|
||||
startAt := time.Now().Add(time.Hour * -1)
|
||||
endAt := time.Now().Add(time.Hour * 1)
|
||||
|
||||
tiers := lo.Map([]*protobuf.Tier{
|
||||
{
|
||||
PriceSat: 100,
|
||||
Limit: 5,
|
||||
MaxPerAddress: 100,
|
||||
},
|
||||
{
|
||||
PriceSat: 200,
|
||||
Limit: 5,
|
||||
MaxPerAddress: 100,
|
||||
},
|
||||
}, func(tier *protobuf.Tier, _ int) []byte {
|
||||
tierJson, err := protojson.Marshal(tier)
|
||||
require.NoError(t, err)
|
||||
return tierJson
|
||||
})
|
||||
mockDgTx.EXPECT().GetNodeSale(mock.Anything, datagateway.GetNodeSaleParams{
|
||||
BlockHeight: 100,
|
||||
TxIndex: 1,
|
||||
}).Return([]entity.NodeSale{
|
||||
{
|
||||
BlockHeight: 100,
|
||||
TxIndex: 1,
|
||||
Name: t.Name(),
|
||||
StartsAt: startAt,
|
||||
EndsAt: endAt,
|
||||
Tiers: tiers,
|
||||
SellerPublicKey: sellerPubkeyHex,
|
||||
MaxPerAddress: 100,
|
||||
DeployTxHash: "040404040404",
|
||||
MaxDiscountPercentage: 50,
|
||||
SellerWallet: sellerWallet.EncodeAddress(),
|
||||
},
|
||||
}, nil)
|
||||
|
||||
buyerPrivateKey, _ := btcec.NewPrivateKey()
|
||||
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
|
||||
|
||||
payload := &protobuf.PurchasePayload{
|
||||
DeployID: &protobuf.ActionID{
|
||||
Block: 100,
|
||||
TxIndex: 1,
|
||||
},
|
||||
NodeIDs: []uint32{1, 2},
|
||||
BuyerPublicKey: buyerPubkeyHex,
|
||||
TimeOutBlock: testBlockHeight + 5,
|
||||
}
|
||||
|
||||
payloadBytes, _ := proto.Marshal(payload)
|
||||
payloadHash := chainhash.DoubleHashB(payloadBytes)
|
||||
signature := ecdsa.Sign(buyerPrivateKey, payloadHash[:])
|
||||
signatureHex := hex.EncodeToString(signature.Serialize())
|
||||
|
||||
message := &protobuf.NodeSaleEvent{
|
||||
Action: protobuf.Action_ACTION_PURCHASE,
|
||||
Purchase: &protobuf.ActionPurchase{
|
||||
Payload: payload,
|
||||
SellerSignature: signatureHex,
|
||||
},
|
||||
}
|
||||
|
||||
event, block := assembleTestEvent(buyerPrivateKey, "0B0B0B", "0B0B0B", 0, 0, message)
|
||||
|
||||
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
|
||||
return event.Valid == false && event.Reason == purchase.INVALID_SIGNATURE
|
||||
})).Return(nil)
|
||||
|
||||
err := p.ProcessPurchase(ctx, mockDgTx, block, event)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockDgTx.AssertNotCalled(t, "CreateNode")
|
||||
}
|
||||
|
||||
func TestValidPurchase(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
|
||||
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
|
||||
|
||||
sellerPrivateKey, _ := btcec.NewPrivateKey()
|
||||
sellerPubkeyHex := hex.EncodeToString(sellerPrivateKey.PubKey().SerializeCompressed())
|
||||
sellerWallet := p.PubkeyToPkHashAddress(sellerPrivateKey.PubKey())
|
||||
|
||||
startAt := time.Now().Add(time.Hour * -1)
|
||||
endAt := time.Now().Add(time.Hour * 1)
|
||||
|
||||
tiers := lo.Map([]*protobuf.Tier{
|
||||
{
|
||||
PriceSat: 100,
|
||||
Limit: 5,
|
||||
MaxPerAddress: 100,
|
||||
},
|
||||
{
|
||||
PriceSat: 200,
|
||||
Limit: 4,
|
||||
MaxPerAddress: 2,
|
||||
},
|
||||
{
|
||||
PriceSat: 400,
|
||||
Limit: 3,
|
||||
MaxPerAddress: 100,
|
||||
},
|
||||
}, func(tier *protobuf.Tier, _ int) []byte {
|
||||
tierJson, err := protojson.Marshal(tier)
|
||||
require.NoError(t, err)
|
||||
return tierJson
|
||||
})
|
||||
|
||||
mockDgTx.EXPECT().GetNodeSale(mock.Anything, datagateway.GetNodeSaleParams{
|
||||
BlockHeight: 100,
|
||||
TxIndex: 1,
|
||||
}).Return([]entity.NodeSale{
|
||||
{
|
||||
BlockHeight: 100,
|
||||
TxIndex: 1,
|
||||
Name: t.Name(),
|
||||
StartsAt: startAt,
|
||||
EndsAt: endAt,
|
||||
Tiers: tiers,
|
||||
SellerPublicKey: sellerPubkeyHex,
|
||||
MaxPerAddress: 100,
|
||||
DeployTxHash: "040404040404",
|
||||
MaxDiscountPercentage: 50,
|
||||
SellerWallet: sellerWallet.EncodeAddress(),
|
||||
},
|
||||
}, nil)
|
||||
|
||||
mockDgTx.EXPECT().GetNodesByIds(mock.Anything, mock.Anything).Return(nil, nil)
|
||||
|
||||
mockDgTx.EXPECT().GetNodesByOwner(mock.Anything, mock.Anything).Return(nil, nil)
|
||||
|
||||
buyerPrivateKey, _ := btcec.NewPrivateKey()
|
||||
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
|
||||
|
||||
payload := &protobuf.PurchasePayload{
|
||||
DeployID: &protobuf.ActionID{
|
||||
Block: 100,
|
||||
TxIndex: 1,
|
||||
},
|
||||
BuyerPublicKey: buyerPubkeyHex,
|
||||
TimeOutBlock: uint64(testBlockHeight) + 5,
|
||||
NodeIDs: []uint32{0, 5, 6, 9},
|
||||
TotalAmountSat: 500,
|
||||
}
|
||||
|
||||
payloadBytes, _ := proto.Marshal(payload)
|
||||
payloadHash := chainhash.DoubleHashB(payloadBytes)
|
||||
signature := ecdsa.Sign(sellerPrivateKey, payloadHash[:])
|
||||
signatureHex := hex.EncodeToString(signature.Serialize())
|
||||
|
||||
message := &protobuf.NodeSaleEvent{
|
||||
Action: protobuf.Action_ACTION_PURCHASE,
|
||||
Purchase: &protobuf.ActionPurchase{
|
||||
Payload: payload,
|
||||
SellerSignature: signatureHex,
|
||||
},
|
||||
}
|
||||
|
||||
event, block := assembleTestEvent(buyerPrivateKey, "0D0D0D0D", "0D0D0D0D", 0, 0, message)
|
||||
event.InputValue = 500
|
||||
|
||||
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
|
||||
return event.Valid == true && event.Reason == ""
|
||||
})).Return(nil)
|
||||
|
||||
mockDgTx.EXPECT().CreateNode(mock.Anything, mock.MatchedBy(func(node entity.Node) bool {
|
||||
return node.NodeID == 0 &&
|
||||
node.TierIndex == 0 &&
|
||||
node.OwnerPublicKey == buyerPubkeyHex &&
|
||||
node.PurchaseTxHash == event.Transaction.TxHash.String() &&
|
||||
node.SaleBlock == 100 &&
|
||||
node.SaleTxIndex == 1
|
||||
})).Return(nil)
|
||||
|
||||
mockDgTx.EXPECT().CreateNode(mock.Anything, mock.MatchedBy(func(node entity.Node) bool {
|
||||
return node.NodeID == 5 &&
|
||||
node.TierIndex == 1 &&
|
||||
node.OwnerPublicKey == buyerPubkeyHex &&
|
||||
node.PurchaseTxHash == event.Transaction.TxHash.String() &&
|
||||
node.SaleBlock == 100 &&
|
||||
node.SaleTxIndex == 1
|
||||
})).Return(nil)
|
||||
|
||||
mockDgTx.EXPECT().CreateNode(mock.Anything, mock.MatchedBy(func(node entity.Node) bool {
|
||||
return node.NodeID == 6 &&
|
||||
node.TierIndex == 1 &&
|
||||
node.OwnerPublicKey == buyerPubkeyHex &&
|
||||
node.PurchaseTxHash == event.Transaction.TxHash.String() &&
|
||||
node.SaleBlock == 100 &&
|
||||
node.SaleTxIndex == 1
|
||||
})).Return(nil)
|
||||
|
||||
mockDgTx.EXPECT().CreateNode(mock.Anything, mock.MatchedBy(func(node entity.Node) bool {
|
||||
return node.NodeID == 9 &&
|
||||
node.TierIndex == 2 &&
|
||||
node.OwnerPublicKey == buyerPubkeyHex &&
|
||||
node.PurchaseTxHash == event.Transaction.TxHash.String() &&
|
||||
node.SaleBlock == 100 &&
|
||||
node.SaleTxIndex == 1
|
||||
})).Return(nil)
|
||||
|
||||
err := p.ProcessPurchase(ctx, mockDgTx, block, event)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestMismatchPayment(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
|
||||
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
|
||||
|
||||
sellerPrivateKey, _ := btcec.NewPrivateKey()
|
||||
sellerPubkeyHex := hex.EncodeToString(sellerPrivateKey.PubKey().SerializeCompressed())
|
||||
sellerWallet := p.PubkeyToPkHashAddress(sellerPrivateKey.PubKey())
|
||||
|
||||
startAt := time.Now().Add(time.Hour * -1)
|
||||
endAt := time.Now().Add(time.Hour * 1)
|
||||
|
||||
tiers := lo.Map([]*protobuf.Tier{
|
||||
{
|
||||
PriceSat: 100,
|
||||
Limit: 5,
|
||||
MaxPerAddress: 100,
|
||||
},
|
||||
{
|
||||
PriceSat: 200,
|
||||
Limit: 4,
|
||||
MaxPerAddress: 2,
|
||||
},
|
||||
{
|
||||
PriceSat: 400,
|
||||
Limit: 3,
|
||||
MaxPerAddress: 100,
|
||||
},
|
||||
}, func(tier *protobuf.Tier, _ int) []byte {
|
||||
tierJson, err := protojson.Marshal(tier)
|
||||
require.NoError(t, err)
|
||||
return tierJson
|
||||
})
|
||||
|
||||
mockDgTx.EXPECT().GetNodeSale(mock.Anything, datagateway.GetNodeSaleParams{
|
||||
BlockHeight: 100,
|
||||
TxIndex: 1,
|
||||
}).Return([]entity.NodeSale{
|
||||
{
|
||||
BlockHeight: 100,
|
||||
TxIndex: 1,
|
||||
Name: t.Name(),
|
||||
StartsAt: startAt,
|
||||
EndsAt: endAt,
|
||||
Tiers: tiers,
|
||||
SellerPublicKey: sellerPubkeyHex,
|
||||
MaxPerAddress: 100,
|
||||
DeployTxHash: "040404040404",
|
||||
MaxDiscountPercentage: 50,
|
||||
SellerWallet: sellerWallet.EncodeAddress(),
|
||||
},
|
||||
}, nil)
|
||||
|
||||
mockDgTx.EXPECT().GetNodesByIds(mock.Anything, mock.Anything).Return(nil, nil)
|
||||
|
||||
buyerPrivateKey, _ := btcec.NewPrivateKey()
|
||||
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
|
||||
|
||||
payload := &protobuf.PurchasePayload{
|
||||
DeployID: &protobuf.ActionID{
|
||||
Block: 100,
|
||||
TxIndex: 1,
|
||||
},
|
||||
BuyerPublicKey: buyerPubkeyHex,
|
||||
TimeOutBlock: uint64(testBlockHeight) + 5,
|
||||
NodeIDs: []uint32{0, 5, 6, 9},
|
||||
TotalAmountSat: 500,
|
||||
}
|
||||
|
||||
payloadBytes, _ := proto.Marshal(payload)
|
||||
payloadHash := chainhash.DoubleHashB(payloadBytes)
|
||||
signature := ecdsa.Sign(sellerPrivateKey, payloadHash[:])
|
||||
signatureHex := hex.EncodeToString(signature.Serialize())
|
||||
|
||||
message := &protobuf.NodeSaleEvent{
|
||||
Action: protobuf.Action_ACTION_PURCHASE,
|
||||
Purchase: &protobuf.ActionPurchase{
|
||||
Payload: payload,
|
||||
SellerSignature: signatureHex,
|
||||
},
|
||||
}
|
||||
|
||||
event, block := assembleTestEvent(buyerPrivateKey, "0D0D0D0D", "0D0D0D0D", 0, 0, message)
|
||||
event.InputValue = 400
|
||||
|
||||
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
|
||||
return event.Valid == false && event.Reason == purchase.INVALID_PAYMENT
|
||||
})).Return(nil)
|
||||
|
||||
err := p.ProcessPurchase(ctx, mockDgTx, block, event)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestInsufficientFund(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
|
||||
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
|
||||
|
||||
sellerPrivateKey, _ := btcec.NewPrivateKey()
|
||||
sellerPubkeyHex := hex.EncodeToString(sellerPrivateKey.PubKey().SerializeCompressed())
|
||||
sellerWallet := p.PubkeyToPkHashAddress(sellerPrivateKey.PubKey())
|
||||
|
||||
startAt := time.Now().Add(time.Hour * -1)
|
||||
endAt := time.Now().Add(time.Hour * 1)
|
||||
|
||||
tiers := lo.Map([]*protobuf.Tier{
|
||||
{
|
||||
PriceSat: 100,
|
||||
Limit: 5,
|
||||
MaxPerAddress: 100,
|
||||
},
|
||||
{
|
||||
PriceSat: 200,
|
||||
Limit: 4,
|
||||
MaxPerAddress: 2,
|
||||
},
|
||||
{
|
||||
PriceSat: 400,
|
||||
Limit: 3,
|
||||
MaxPerAddress: 100,
|
||||
},
|
||||
}, func(tier *protobuf.Tier, _ int) []byte {
|
||||
tierJson, err := protojson.Marshal(tier)
|
||||
require.NoError(t, err)
|
||||
return tierJson
|
||||
})
|
||||
|
||||
mockDgTx.EXPECT().GetNodeSale(mock.Anything, datagateway.GetNodeSaleParams{
|
||||
BlockHeight: 100,
|
||||
TxIndex: 1,
|
||||
}).Return([]entity.NodeSale{
|
||||
{
|
||||
BlockHeight: 100,
|
||||
TxIndex: 1,
|
||||
Name: t.Name(),
|
||||
StartsAt: startAt,
|
||||
EndsAt: endAt,
|
||||
Tiers: tiers,
|
||||
SellerPublicKey: sellerPubkeyHex,
|
||||
MaxPerAddress: 100,
|
||||
DeployTxHash: "040404040404",
|
||||
MaxDiscountPercentage: 50,
|
||||
SellerWallet: sellerWallet.EncodeAddress(),
|
||||
},
|
||||
}, nil)
|
||||
|
||||
mockDgTx.EXPECT().GetNodesByIds(mock.Anything, mock.Anything).Return(nil, nil)
|
||||
|
||||
buyerPrivateKey, _ := btcec.NewPrivateKey()
|
||||
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
|
||||
|
||||
payload := &protobuf.PurchasePayload{
|
||||
DeployID: &protobuf.ActionID{
|
||||
Block: 100,
|
||||
TxIndex: 1,
|
||||
},
|
||||
BuyerPublicKey: buyerPubkeyHex,
|
||||
TimeOutBlock: uint64(testBlockHeight) + 5,
|
||||
NodeIDs: []uint32{0, 5, 6, 9},
|
||||
TotalAmountSat: 200,
|
||||
}
|
||||
|
||||
payloadBytes, _ := proto.Marshal(payload)
|
||||
payloadHash := chainhash.DoubleHashB(payloadBytes)
|
||||
signature := ecdsa.Sign(sellerPrivateKey, payloadHash[:])
|
||||
signatureHex := hex.EncodeToString(signature.Serialize())
|
||||
|
||||
message := &protobuf.NodeSaleEvent{
|
||||
Action: protobuf.Action_ACTION_PURCHASE,
|
||||
Purchase: &protobuf.ActionPurchase{
|
||||
Payload: payload,
|
||||
SellerSignature: signatureHex,
|
||||
},
|
||||
}
|
||||
|
||||
event, block := assembleTestEvent(buyerPrivateKey, "0D0D0D0D", "0D0D0D0D", 0, 0, message)
|
||||
event.InputValue = 200
|
||||
|
||||
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
|
||||
return event.Valid == false && event.Reason == purchase.INSUFFICIENT_FUND
|
||||
})).Return(nil)
|
||||
|
||||
err := p.ProcessPurchase(ctx, mockDgTx, block, event)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestBuyingLimit(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
|
||||
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
|
||||
|
||||
sellerPrivateKey, _ := btcec.NewPrivateKey()
|
||||
sellerPubkeyHex := hex.EncodeToString(sellerPrivateKey.PubKey().SerializeCompressed())
|
||||
sellerWallet := p.PubkeyToPkHashAddress(sellerPrivateKey.PubKey())
|
||||
|
||||
startAt := time.Now().Add(time.Hour * -1)
|
||||
endAt := time.Now().Add(time.Hour * 1)
|
||||
|
||||
tiers := lo.Map([]*protobuf.Tier{
|
||||
{
|
||||
PriceSat: 100,
|
||||
Limit: 5,
|
||||
MaxPerAddress: 100,
|
||||
},
|
||||
{
|
||||
PriceSat: 200,
|
||||
Limit: 4,
|
||||
MaxPerAddress: 2,
|
||||
},
|
||||
{
|
||||
PriceSat: 400,
|
||||
Limit: 50,
|
||||
MaxPerAddress: 100,
|
||||
},
|
||||
}, func(tier *protobuf.Tier, _ int) []byte {
|
||||
tierJson, err := protojson.Marshal(tier)
|
||||
require.NoError(t, err)
|
||||
return tierJson
|
||||
})
|
||||
|
||||
mockDgTx.EXPECT().GetNodeSale(mock.Anything, datagateway.GetNodeSaleParams{
|
||||
BlockHeight: 100,
|
||||
TxIndex: 1,
|
||||
}).Return([]entity.NodeSale{
|
||||
{
|
||||
BlockHeight: 100,
|
||||
TxIndex: 1,
|
||||
Name: t.Name(),
|
||||
StartsAt: startAt,
|
||||
EndsAt: endAt,
|
||||
Tiers: tiers,
|
||||
SellerPublicKey: sellerPubkeyHex,
|
||||
MaxPerAddress: 2,
|
||||
DeployTxHash: "040404040404",
|
||||
MaxDiscountPercentage: 50,
|
||||
SellerWallet: sellerWallet.EncodeAddress(),
|
||||
},
|
||||
}, nil)
|
||||
|
||||
buyerPrivateKey, _ := btcec.NewPrivateKey()
|
||||
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
|
||||
|
||||
mockDgTx.EXPECT().GetNodesByIds(mock.Anything, mock.Anything).Return(nil, nil)
|
||||
|
||||
mockDgTx.EXPECT().GetNodesByOwner(mock.Anything, datagateway.GetNodesByOwnerParams{
|
||||
SaleBlock: 100,
|
||||
SaleTxIndex: 1,
|
||||
OwnerPublicKey: buyerPubkeyHex,
|
||||
}).Return([]entity.Node{
|
||||
{
|
||||
SaleBlock: 100,
|
||||
SaleTxIndex: 1,
|
||||
NodeID: 9,
|
||||
TierIndex: 2,
|
||||
OwnerPublicKey: buyerPubkeyHex,
|
||||
},
|
||||
{
|
||||
SaleBlock: 100,
|
||||
SaleTxIndex: 1,
|
||||
NodeID: 10,
|
||||
TierIndex: 2,
|
||||
OwnerPublicKey: buyerPubkeyHex,
|
||||
},
|
||||
}, nil)
|
||||
|
||||
payload := &protobuf.PurchasePayload{
|
||||
DeployID: &protobuf.ActionID{
|
||||
Block: 100,
|
||||
TxIndex: 1,
|
||||
},
|
||||
BuyerPublicKey: buyerPubkeyHex,
|
||||
TimeOutBlock: uint64(testBlockHeight) + 5,
|
||||
NodeIDs: []uint32{11},
|
||||
TotalAmountSat: 600,
|
||||
}
|
||||
|
||||
payloadBytes, _ := proto.Marshal(payload)
|
||||
payloadHash := chainhash.DoubleHashB(payloadBytes)
|
||||
signature := ecdsa.Sign(sellerPrivateKey, payloadHash[:])
|
||||
signatureHex := hex.EncodeToString(signature.Serialize())
|
||||
|
||||
message := &protobuf.NodeSaleEvent{
|
||||
Action: protobuf.Action_ACTION_PURCHASE,
|
||||
Purchase: &protobuf.ActionPurchase{
|
||||
Payload: payload,
|
||||
SellerSignature: signatureHex,
|
||||
},
|
||||
}
|
||||
|
||||
event, block := assembleTestEvent(buyerPrivateKey, "22222222", "22222222", 0, 0, message)
|
||||
event.InputValue = 600
|
||||
|
||||
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
|
||||
return event.Valid == false && event.Reason == purchase.OVER_LIMIT_PER_ADDR
|
||||
})).Return(nil)
|
||||
|
||||
err := p.ProcessPurchase(ctx, mockDgTx, block, event)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockDgTx.AssertNotCalled(t, "CreateNode")
|
||||
}
|
||||
|
||||
func TestBuyingTierLimit(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockDgTx := mocks.NewNodeSaleDataGatewayWithTx(t)
|
||||
p := NewProcessor(mockDgTx, nil, common.NetworkMainnet, nil, 0)
|
||||
|
||||
sellerPrivateKey, _ := btcec.NewPrivateKey()
|
||||
sellerPubkeyHex := hex.EncodeToString(sellerPrivateKey.PubKey().SerializeCompressed())
|
||||
sellerWallet := p.PubkeyToPkHashAddress(sellerPrivateKey.PubKey())
|
||||
|
||||
startAt := time.Now().Add(time.Hour * -1)
|
||||
endAt := time.Now().Add(time.Hour * 1)
|
||||
|
||||
tiers := lo.Map([]*protobuf.Tier{
|
||||
{
|
||||
PriceSat: 100,
|
||||
Limit: 5,
|
||||
MaxPerAddress: 100,
|
||||
},
|
||||
{
|
||||
PriceSat: 200,
|
||||
Limit: 4,
|
||||
MaxPerAddress: 2,
|
||||
},
|
||||
{
|
||||
PriceSat: 400,
|
||||
Limit: 50,
|
||||
MaxPerAddress: 3,
|
||||
},
|
||||
}, func(tier *protobuf.Tier, _ int) []byte {
|
||||
tierJson, err := protojson.Marshal(tier)
|
||||
require.NoError(t, err)
|
||||
return tierJson
|
||||
})
|
||||
|
||||
mockDgTx.EXPECT().GetNodeSale(mock.Anything, datagateway.GetNodeSaleParams{
|
||||
BlockHeight: 100,
|
||||
TxIndex: 1,
|
||||
}).Return([]entity.NodeSale{
|
||||
{
|
||||
BlockHeight: 100,
|
||||
TxIndex: 1,
|
||||
Name: t.Name(),
|
||||
StartsAt: startAt,
|
||||
EndsAt: endAt,
|
||||
Tiers: tiers,
|
||||
SellerPublicKey: sellerPubkeyHex,
|
||||
MaxPerAddress: 100,
|
||||
DeployTxHash: "040404040404",
|
||||
MaxDiscountPercentage: 50,
|
||||
SellerWallet: sellerWallet.EncodeAddress(),
|
||||
},
|
||||
}, nil)
|
||||
|
||||
buyerPrivateKey, _ := btcec.NewPrivateKey()
|
||||
buyerPubkeyHex := hex.EncodeToString(buyerPrivateKey.PubKey().SerializeCompressed())
|
||||
|
||||
mockDgTx.EXPECT().GetNodesByIds(mock.Anything, mock.Anything).Return(nil, nil)
|
||||
|
||||
mockDgTx.EXPECT().GetNodesByOwner(mock.Anything, datagateway.GetNodesByOwnerParams{
|
||||
SaleBlock: 100,
|
||||
SaleTxIndex: 1,
|
||||
OwnerPublicKey: buyerPubkeyHex,
|
||||
}).Return([]entity.Node{
|
||||
{
|
||||
SaleBlock: 100,
|
||||
SaleTxIndex: 1,
|
||||
NodeID: 9,
|
||||
TierIndex: 2,
|
||||
OwnerPublicKey: buyerPubkeyHex,
|
||||
},
|
||||
{
|
||||
SaleBlock: 100,
|
||||
SaleTxIndex: 1,
|
||||
NodeID: 10,
|
||||
TierIndex: 2,
|
||||
OwnerPublicKey: buyerPubkeyHex,
|
||||
},
|
||||
{
|
||||
SaleBlock: 100,
|
||||
SaleTxIndex: 1,
|
||||
NodeID: 11,
|
||||
TierIndex: 2,
|
||||
OwnerPublicKey: buyerPubkeyHex,
|
||||
},
|
||||
}, nil)
|
||||
|
||||
payload := &protobuf.PurchasePayload{
|
||||
DeployID: &protobuf.ActionID{
|
||||
Block: 100,
|
||||
TxIndex: 1,
|
||||
},
|
||||
BuyerPublicKey: buyerPubkeyHex,
|
||||
TimeOutBlock: uint64(testBlockHeight) + 5,
|
||||
NodeIDs: []uint32{12, 13, 14},
|
||||
TotalAmountSat: 600,
|
||||
}
|
||||
|
||||
payloadBytes, _ := proto.Marshal(payload)
|
||||
payloadHash := chainhash.DoubleHashB(payloadBytes)
|
||||
signature := ecdsa.Sign(sellerPrivateKey, payloadHash[:])
|
||||
signatureHex := hex.EncodeToString(signature.Serialize())
|
||||
|
||||
message := &protobuf.NodeSaleEvent{
|
||||
Action: protobuf.Action_ACTION_PURCHASE,
|
||||
Purchase: &protobuf.ActionPurchase{
|
||||
Payload: payload,
|
||||
SellerSignature: signatureHex,
|
||||
},
|
||||
}
|
||||
|
||||
event, block := assembleTestEvent(buyerPrivateKey, "10101010", "10101010", 0, 0, message)
|
||||
event.InputValue = 600
|
||||
|
||||
mockDgTx.EXPECT().CreateEvent(mock.Anything, mock.MatchedBy(func(event entity.NodeSaleEvent) bool {
|
||||
return event.Valid == false && event.Reason == purchase.OVER_LIMIT_PER_TIER
|
||||
})).Return(nil)
|
||||
|
||||
err := p.ProcessPurchase(ctx, mockDgTx, block, event)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
62
modules/nodesale/repository/postgres/gen/blocks.sql.go
Normal file
62
modules/nodesale/repository/postgres/gen/blocks.sql.go
Normal file
@@ -0,0 +1,62 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.27.0
|
||||
// source: blocks.sql
|
||||
|
||||
package gen
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
const createBlock = `-- name: CreateBlock :exec
|
||||
INSERT INTO blocks ("block_height", "block_hash", "module")
|
||||
VALUES ($1, $2, $3)
|
||||
`
|
||||
|
||||
type CreateBlockParams struct {
|
||||
BlockHeight int64
|
||||
BlockHash string
|
||||
Module string
|
||||
}
|
||||
|
||||
func (q *Queries) CreateBlock(ctx context.Context, arg CreateBlockParams) error {
|
||||
_, err := q.db.Exec(ctx, createBlock, arg.BlockHeight, arg.BlockHash, arg.Module)
|
||||
return err
|
||||
}
|
||||
|
||||
const getBlock = `-- name: GetBlock :one
|
||||
SELECT block_height, block_hash, module FROM blocks
|
||||
WHERE "block_height" = $1
|
||||
`
|
||||
|
||||
func (q *Queries) GetBlock(ctx context.Context, blockHeight int64) (Block, error) {
|
||||
row := q.db.QueryRow(ctx, getBlock, blockHeight)
|
||||
var i Block
|
||||
err := row.Scan(&i.BlockHeight, &i.BlockHash, &i.Module)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const getLastProcessedBlock = `-- name: GetLastProcessedBlock :one
|
||||
SELECT block_height, block_hash, module FROM blocks ORDER BY block_height DESC LIMIT 1
|
||||
`
|
||||
|
||||
func (q *Queries) GetLastProcessedBlock(ctx context.Context) (Block, error) {
|
||||
row := q.db.QueryRow(ctx, getLastProcessedBlock)
|
||||
var i Block
|
||||
err := row.Scan(&i.BlockHeight, &i.BlockHash, &i.Module)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const removeBlockFrom = `-- name: RemoveBlockFrom :execrows
|
||||
DELETE FROM blocks
|
||||
WHERE "block_height" >= $1
|
||||
`
|
||||
|
||||
func (q *Queries) RemoveBlockFrom(ctx context.Context, fromBlock int64) (int64, error) {
|
||||
result, err := q.db.Exec(ctx, removeBlockFrom, fromBlock)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return result.RowsAffected(), nil
|
||||
}
|
||||
32
modules/nodesale/repository/postgres/gen/db.go
Normal file
32
modules/nodesale/repository/postgres/gen/db.go
Normal file
@@ -0,0 +1,32 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.27.0
|
||||
|
||||
package gen
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
)
|
||||
|
||||
type DBTX interface {
|
||||
Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error)
|
||||
Query(context.Context, string, ...interface{}) (pgx.Rows, error)
|
||||
QueryRow(context.Context, string, ...interface{}) pgx.Row
|
||||
}
|
||||
|
||||
func New(db DBTX) *Queries {
|
||||
return &Queries{db: db}
|
||||
}
|
||||
|
||||
type Queries struct {
|
||||
db DBTX
|
||||
}
|
||||
|
||||
func (q *Queries) WithTx(tx pgx.Tx) *Queries {
|
||||
return &Queries{
|
||||
db: tx,
|
||||
}
|
||||
}
|
||||
104
modules/nodesale/repository/postgres/gen/events.sql.go
Normal file
104
modules/nodesale/repository/postgres/gen/events.sql.go
Normal file
@@ -0,0 +1,104 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.27.0
|
||||
// source: events.sql
|
||||
|
||||
package gen
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgtype"
|
||||
)
|
||||
|
||||
const createEvent = `-- name: CreateEvent :exec
|
||||
INSERT INTO events ("tx_hash", "block_height", "tx_index", "wallet_address", "valid", "action",
|
||||
"raw_message", "parsed_message", "block_timestamp", "block_hash", "metadata",
|
||||
"reason")
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)
|
||||
`
|
||||
|
||||
type CreateEventParams struct {
|
||||
TxHash string
|
||||
BlockHeight int64
|
||||
TxIndex int32
|
||||
WalletAddress string
|
||||
Valid bool
|
||||
Action int32
|
||||
RawMessage []byte
|
||||
ParsedMessage []byte
|
||||
BlockTimestamp pgtype.Timestamp
|
||||
BlockHash string
|
||||
Metadata []byte
|
||||
Reason string
|
||||
}
|
||||
|
||||
func (q *Queries) CreateEvent(ctx context.Context, arg CreateEventParams) error {
|
||||
_, err := q.db.Exec(ctx, createEvent,
|
||||
arg.TxHash,
|
||||
arg.BlockHeight,
|
||||
arg.TxIndex,
|
||||
arg.WalletAddress,
|
||||
arg.Valid,
|
||||
arg.Action,
|
||||
arg.RawMessage,
|
||||
arg.ParsedMessage,
|
||||
arg.BlockTimestamp,
|
||||
arg.BlockHash,
|
||||
arg.Metadata,
|
||||
arg.Reason,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
const getEventsByWallet = `-- name: GetEventsByWallet :many
|
||||
SELECT tx_hash, block_height, tx_index, wallet_address, valid, action, raw_message, parsed_message, block_timestamp, block_hash, metadata, reason
|
||||
FROM events
|
||||
WHERE wallet_address = $1
|
||||
`
|
||||
|
||||
func (q *Queries) GetEventsByWallet(ctx context.Context, walletAddress string) ([]Event, error) {
|
||||
rows, err := q.db.Query(ctx, getEventsByWallet, walletAddress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []Event
|
||||
for rows.Next() {
|
||||
var i Event
|
||||
if err := rows.Scan(
|
||||
&i.TxHash,
|
||||
&i.BlockHeight,
|
||||
&i.TxIndex,
|
||||
&i.WalletAddress,
|
||||
&i.Valid,
|
||||
&i.Action,
|
||||
&i.RawMessage,
|
||||
&i.ParsedMessage,
|
||||
&i.BlockTimestamp,
|
||||
&i.BlockHash,
|
||||
&i.Metadata,
|
||||
&i.Reason,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const removeEventsFromBlock = `-- name: RemoveEventsFromBlock :execrows
|
||||
DELETE FROM events
|
||||
WHERE "block_height" >= $1
|
||||
`
|
||||
|
||||
func (q *Queries) RemoveEventsFromBlock(ctx context.Context, fromBlock int64) (int64, error) {
|
||||
result, err := q.db.Exec(ctx, removeEventsFromBlock, fromBlock)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return result.RowsAffected(), nil
|
||||
}
|
||||
55
modules/nodesale/repository/postgres/gen/models.go
Normal file
55
modules/nodesale/repository/postgres/gen/models.go
Normal file
@@ -0,0 +1,55 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.27.0
|
||||
|
||||
package gen
|
||||
|
||||
import (
|
||||
"github.com/jackc/pgx/v5/pgtype"
|
||||
)
|
||||
|
||||
type Block struct {
|
||||
BlockHeight int64
|
||||
BlockHash string
|
||||
Module string
|
||||
}
|
||||
|
||||
type Event struct {
|
||||
TxHash string
|
||||
BlockHeight int64
|
||||
TxIndex int32
|
||||
WalletAddress string
|
||||
Valid bool
|
||||
Action int32
|
||||
RawMessage []byte
|
||||
ParsedMessage []byte
|
||||
BlockTimestamp pgtype.Timestamp
|
||||
BlockHash string
|
||||
Metadata []byte
|
||||
Reason string
|
||||
}
|
||||
|
||||
type Node struct {
|
||||
SaleBlock int64
|
||||
SaleTxIndex int32
|
||||
NodeID int32
|
||||
TierIndex int32
|
||||
DelegatedTo string
|
||||
OwnerPublicKey string
|
||||
PurchaseTxHash string
|
||||
DelegateTxHash string
|
||||
}
|
||||
|
||||
type NodeSale struct {
|
||||
BlockHeight int64
|
||||
TxIndex int32
|
||||
Name string
|
||||
StartsAt pgtype.Timestamp
|
||||
EndsAt pgtype.Timestamp
|
||||
Tiers [][]byte
|
||||
SellerPublicKey string
|
||||
MaxPerAddress int32
|
||||
DeployTxHash string
|
||||
MaxDiscountPercentage int32
|
||||
SellerWallet string
|
||||
}
|
||||
312
modules/nodesale/repository/postgres/gen/nodes.sql.go
Normal file
312
modules/nodesale/repository/postgres/gen/nodes.sql.go
Normal file
@@ -0,0 +1,312 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.27.0
|
||||
// source: nodes.sql
|
||||
|
||||
package gen
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
const clearDelegate = `-- name: ClearDelegate :execrows
|
||||
UPDATE nodes
|
||||
SET "delegated_to" = ''
|
||||
WHERE "delegate_tx_hash" = ''
|
||||
`
|
||||
|
||||
func (q *Queries) ClearDelegate(ctx context.Context) (int64, error) {
|
||||
result, err := q.db.Exec(ctx, clearDelegate)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return result.RowsAffected(), nil
|
||||
}
|
||||
|
||||
const createNode = `-- name: CreateNode :exec
|
||||
INSERT INTO nodes (sale_block, sale_tx_index, node_id, tier_index, delegated_to, owner_public_key, purchase_tx_hash, delegate_tx_hash)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
||||
`
|
||||
|
||||
type CreateNodeParams struct {
|
||||
SaleBlock int64
|
||||
SaleTxIndex int32
|
||||
NodeID int32
|
||||
TierIndex int32
|
||||
DelegatedTo string
|
||||
OwnerPublicKey string
|
||||
PurchaseTxHash string
|
||||
DelegateTxHash string
|
||||
}
|
||||
|
||||
func (q *Queries) CreateNode(ctx context.Context, arg CreateNodeParams) error {
|
||||
_, err := q.db.Exec(ctx, createNode,
|
||||
arg.SaleBlock,
|
||||
arg.SaleTxIndex,
|
||||
arg.NodeID,
|
||||
arg.TierIndex,
|
||||
arg.DelegatedTo,
|
||||
arg.OwnerPublicKey,
|
||||
arg.PurchaseTxHash,
|
||||
arg.DelegateTxHash,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
const getNodeCountByTierIndex = `-- name: GetNodeCountByTierIndex :many
|
||||
SELECT (tiers.tier_index)::int AS tier_index, count(nodes.tier_index)
|
||||
FROM generate_series($3::int,$4::int) AS tiers(tier_index)
|
||||
LEFT JOIN
|
||||
(SELECT sale_block, sale_tx_index, node_id, tier_index, delegated_to, owner_public_key, purchase_tx_hash, delegate_tx_hash
|
||||
FROM nodes
|
||||
WHERE sale_block = $1 AND
|
||||
sale_tx_index= $2)
|
||||
AS nodes ON tiers.tier_index = nodes.tier_index
|
||||
GROUP BY tiers.tier_index
|
||||
ORDER BY tiers.tier_index
|
||||
`
|
||||
|
||||
type GetNodeCountByTierIndexParams struct {
|
||||
SaleBlock int64
|
||||
SaleTxIndex int32
|
||||
FromTier int32
|
||||
ToTier int32
|
||||
}
|
||||
|
||||
type GetNodeCountByTierIndexRow struct {
|
||||
TierIndex int32
|
||||
Count int64
|
||||
}
|
||||
|
||||
func (q *Queries) GetNodeCountByTierIndex(ctx context.Context, arg GetNodeCountByTierIndexParams) ([]GetNodeCountByTierIndexRow, error) {
|
||||
rows, err := q.db.Query(ctx, getNodeCountByTierIndex,
|
||||
arg.SaleBlock,
|
||||
arg.SaleTxIndex,
|
||||
arg.FromTier,
|
||||
arg.ToTier,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []GetNodeCountByTierIndexRow
|
||||
for rows.Next() {
|
||||
var i GetNodeCountByTierIndexRow
|
||||
if err := rows.Scan(&i.TierIndex, &i.Count); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getNodesByDeployment = `-- name: GetNodesByDeployment :many
|
||||
SELECT sale_block, sale_tx_index, node_id, tier_index, delegated_to, owner_public_key, purchase_tx_hash, delegate_tx_hash
|
||||
FROM nodes
|
||||
WHERE sale_block = $1 AND
|
||||
sale_tx_index = $2
|
||||
`
|
||||
|
||||
type GetNodesByDeploymentParams struct {
|
||||
SaleBlock int64
|
||||
SaleTxIndex int32
|
||||
}
|
||||
|
||||
func (q *Queries) GetNodesByDeployment(ctx context.Context, arg GetNodesByDeploymentParams) ([]Node, error) {
|
||||
rows, err := q.db.Query(ctx, getNodesByDeployment, arg.SaleBlock, arg.SaleTxIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []Node
|
||||
for rows.Next() {
|
||||
var i Node
|
||||
if err := rows.Scan(
|
||||
&i.SaleBlock,
|
||||
&i.SaleTxIndex,
|
||||
&i.NodeID,
|
||||
&i.TierIndex,
|
||||
&i.DelegatedTo,
|
||||
&i.OwnerPublicKey,
|
||||
&i.PurchaseTxHash,
|
||||
&i.DelegateTxHash,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getNodesByIds = `-- name: GetNodesByIds :many
|
||||
SELECT sale_block, sale_tx_index, node_id, tier_index, delegated_to, owner_public_key, purchase_tx_hash, delegate_tx_hash
|
||||
FROM nodes
|
||||
WHERE sale_block = $1 AND
|
||||
sale_tx_index = $2 AND
|
||||
node_id = ANY ($3::int[])
|
||||
`
|
||||
|
||||
type GetNodesByIdsParams struct {
|
||||
SaleBlock int64
|
||||
SaleTxIndex int32
|
||||
NodeIds []int32
|
||||
}
|
||||
|
||||
func (q *Queries) GetNodesByIds(ctx context.Context, arg GetNodesByIdsParams) ([]Node, error) {
|
||||
rows, err := q.db.Query(ctx, getNodesByIds, arg.SaleBlock, arg.SaleTxIndex, arg.NodeIds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []Node
|
||||
for rows.Next() {
|
||||
var i Node
|
||||
if err := rows.Scan(
|
||||
&i.SaleBlock,
|
||||
&i.SaleTxIndex,
|
||||
&i.NodeID,
|
||||
&i.TierIndex,
|
||||
&i.DelegatedTo,
|
||||
&i.OwnerPublicKey,
|
||||
&i.PurchaseTxHash,
|
||||
&i.DelegateTxHash,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getNodesByOwner = `-- name: GetNodesByOwner :many
|
||||
SELECT sale_block, sale_tx_index, node_id, tier_index, delegated_to, owner_public_key, purchase_tx_hash, delegate_tx_hash
|
||||
FROM nodes
|
||||
WHERE sale_block = $1 AND
|
||||
sale_tx_index = $2 AND
|
||||
owner_public_key = $3
|
||||
ORDER BY tier_index
|
||||
`
|
||||
|
||||
type GetNodesByOwnerParams struct {
|
||||
SaleBlock int64
|
||||
SaleTxIndex int32
|
||||
OwnerPublicKey string
|
||||
}
|
||||
|
||||
func (q *Queries) GetNodesByOwner(ctx context.Context, arg GetNodesByOwnerParams) ([]Node, error) {
|
||||
rows, err := q.db.Query(ctx, getNodesByOwner, arg.SaleBlock, arg.SaleTxIndex, arg.OwnerPublicKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []Node
|
||||
for rows.Next() {
|
||||
var i Node
|
||||
if err := rows.Scan(
|
||||
&i.SaleBlock,
|
||||
&i.SaleTxIndex,
|
||||
&i.NodeID,
|
||||
&i.TierIndex,
|
||||
&i.DelegatedTo,
|
||||
&i.OwnerPublicKey,
|
||||
&i.PurchaseTxHash,
|
||||
&i.DelegateTxHash,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getNodesByPubkey = `-- name: GetNodesByPubkey :many
|
||||
SELECT nodes.sale_block, nodes.sale_tx_index, nodes.node_id, nodes.tier_index, nodes.delegated_to, nodes.owner_public_key, nodes.purchase_tx_hash, nodes.delegate_tx_hash
|
||||
FROM nodes JOIN events ON nodes.purchase_tx_hash = events.tx_hash
|
||||
WHERE sale_block = $1 AND
|
||||
sale_tx_index = $2 AND
|
||||
owner_public_key = $3 AND
|
||||
delegated_to = $4
|
||||
`
|
||||
|
||||
type GetNodesByPubkeyParams struct {
|
||||
SaleBlock int64
|
||||
SaleTxIndex int32
|
||||
OwnerPublicKey string
|
||||
DelegatedTo string
|
||||
}
|
||||
|
||||
func (q *Queries) GetNodesByPubkey(ctx context.Context, arg GetNodesByPubkeyParams) ([]Node, error) {
|
||||
rows, err := q.db.Query(ctx, getNodesByPubkey,
|
||||
arg.SaleBlock,
|
||||
arg.SaleTxIndex,
|
||||
arg.OwnerPublicKey,
|
||||
arg.DelegatedTo,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []Node
|
||||
for rows.Next() {
|
||||
var i Node
|
||||
if err := rows.Scan(
|
||||
&i.SaleBlock,
|
||||
&i.SaleTxIndex,
|
||||
&i.NodeID,
|
||||
&i.TierIndex,
|
||||
&i.DelegatedTo,
|
||||
&i.OwnerPublicKey,
|
||||
&i.PurchaseTxHash,
|
||||
&i.DelegateTxHash,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const setDelegates = `-- name: SetDelegates :execrows
|
||||
UPDATE nodes
|
||||
SET delegated_to = $4, delegate_tx_hash = $3
|
||||
WHERE sale_block = $1 AND
|
||||
sale_tx_index = $2 AND
|
||||
node_id = ANY ($5::int[])
|
||||
`
|
||||
|
||||
type SetDelegatesParams struct {
|
||||
SaleBlock int64
|
||||
SaleTxIndex int32
|
||||
DelegateTxHash string
|
||||
Delegatee string
|
||||
NodeIds []int32
|
||||
}
|
||||
|
||||
func (q *Queries) SetDelegates(ctx context.Context, arg SetDelegatesParams) (int64, error) {
|
||||
result, err := q.db.Exec(ctx, setDelegates,
|
||||
arg.SaleBlock,
|
||||
arg.SaleTxIndex,
|
||||
arg.DelegateTxHash,
|
||||
arg.Delegatee,
|
||||
arg.NodeIds,
|
||||
)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return result.RowsAffected(), nil
|
||||
}
|
||||
92
modules/nodesale/repository/postgres/gen/nodesales.sql.go
Normal file
92
modules/nodesale/repository/postgres/gen/nodesales.sql.go
Normal file
@@ -0,0 +1,92 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.27.0
|
||||
// source: nodesales.sql
|
||||
|
||||
package gen
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgtype"
|
||||
)
|
||||
|
||||
const createNodeSale = `-- name: CreateNodeSale :exec
|
||||
INSERT INTO node_sales ("block_height", "tx_index", "name", "starts_at", "ends_at", "tiers", "seller_public_key", "max_per_address", "deploy_tx_hash", "max_discount_percentage", "seller_wallet")
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)
|
||||
`
|
||||
|
||||
type CreateNodeSaleParams struct {
|
||||
BlockHeight int64
|
||||
TxIndex int32
|
||||
Name string
|
||||
StartsAt pgtype.Timestamp
|
||||
EndsAt pgtype.Timestamp
|
||||
Tiers [][]byte
|
||||
SellerPublicKey string
|
||||
MaxPerAddress int32
|
||||
DeployTxHash string
|
||||
MaxDiscountPercentage int32
|
||||
SellerWallet string
|
||||
}
|
||||
|
||||
func (q *Queries) CreateNodeSale(ctx context.Context, arg CreateNodeSaleParams) error {
|
||||
_, err := q.db.Exec(ctx, createNodeSale,
|
||||
arg.BlockHeight,
|
||||
arg.TxIndex,
|
||||
arg.Name,
|
||||
arg.StartsAt,
|
||||
arg.EndsAt,
|
||||
arg.Tiers,
|
||||
arg.SellerPublicKey,
|
||||
arg.MaxPerAddress,
|
||||
arg.DeployTxHash,
|
||||
arg.MaxDiscountPercentage,
|
||||
arg.SellerWallet,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
const getNodeSale = `-- name: GetNodeSale :many
|
||||
SELECT block_height, tx_index, name, starts_at, ends_at, tiers, seller_public_key, max_per_address, deploy_tx_hash, max_discount_percentage, seller_wallet
|
||||
FROM node_sales
|
||||
WHERE block_height = $1 AND
|
||||
tx_index = $2
|
||||
`
|
||||
|
||||
type GetNodeSaleParams struct {
|
||||
BlockHeight int64
|
||||
TxIndex int32
|
||||
}
|
||||
|
||||
func (q *Queries) GetNodeSale(ctx context.Context, arg GetNodeSaleParams) ([]NodeSale, error) {
|
||||
rows, err := q.db.Query(ctx, getNodeSale, arg.BlockHeight, arg.TxIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []NodeSale
|
||||
for rows.Next() {
|
||||
var i NodeSale
|
||||
if err := rows.Scan(
|
||||
&i.BlockHeight,
|
||||
&i.TxIndex,
|
||||
&i.Name,
|
||||
&i.StartsAt,
|
||||
&i.EndsAt,
|
||||
&i.Tiers,
|
||||
&i.SellerPublicKey,
|
||||
&i.MaxPerAddress,
|
||||
&i.DeployTxHash,
|
||||
&i.MaxDiscountPercentage,
|
||||
&i.SellerWallet,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
20
modules/nodesale/repository/postgres/gen/test.sql.go
Normal file
20
modules/nodesale/repository/postgres/gen/test.sql.go
Normal file
@@ -0,0 +1,20 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.27.0
|
||||
// source: test.sql
|
||||
|
||||
package gen
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
const clearEvents = `-- name: ClearEvents :exec
|
||||
DELETE FROM events
|
||||
WHERE tx_hash <> ''
|
||||
`
|
||||
|
||||
func (q *Queries) ClearEvents(ctx context.Context) error {
|
||||
_, err := q.db.Exec(ctx, clearEvents)
|
||||
return err
|
||||
}
|
||||
74
modules/nodesale/repository/postgres/mapper.go
Normal file
74
modules/nodesale/repository/postgres/mapper.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/repository/postgres/gen"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
func mapNodes(nodes []gen.Node) []entity.Node {
|
||||
return lo.Map(nodes, func(item gen.Node, index int) entity.Node {
|
||||
return entity.Node{
|
||||
SaleBlock: uint64(item.SaleBlock),
|
||||
SaleTxIndex: uint32(item.SaleTxIndex),
|
||||
NodeID: uint32(item.NodeID),
|
||||
TierIndex: item.TierIndex,
|
||||
DelegatedTo: item.DelegatedTo,
|
||||
OwnerPublicKey: item.OwnerPublicKey,
|
||||
PurchaseTxHash: item.PurchaseTxHash,
|
||||
DelegateTxHash: item.DelegateTxHash,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func mapNodeSales(nodeSales []gen.NodeSale) []entity.NodeSale {
|
||||
return lo.Map(nodeSales, func(item gen.NodeSale, index int) entity.NodeSale {
|
||||
return entity.NodeSale{
|
||||
BlockHeight: uint64(item.BlockHeight),
|
||||
TxIndex: uint32(item.TxIndex),
|
||||
Name: item.Name,
|
||||
StartsAt: item.StartsAt.Time,
|
||||
EndsAt: item.EndsAt.Time,
|
||||
Tiers: item.Tiers,
|
||||
SellerPublicKey: item.SellerPublicKey,
|
||||
MaxPerAddress: uint32(item.MaxPerAddress),
|
||||
DeployTxHash: item.DeployTxHash,
|
||||
MaxDiscountPercentage: item.MaxDiscountPercentage,
|
||||
SellerWallet: item.SellerWallet,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func mapNodeCountByTierIndexRows(nodeCount []gen.GetNodeCountByTierIndexRow) []datagateway.GetNodeCountByTierIndexRow {
|
||||
return lo.Map(nodeCount, func(item gen.GetNodeCountByTierIndexRow, index int) datagateway.GetNodeCountByTierIndexRow {
|
||||
return datagateway.GetNodeCountByTierIndexRow{
|
||||
TierIndex: item.TierIndex,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func mapNodeSalesEvents(events []gen.Event) []entity.NodeSaleEvent {
|
||||
return lo.Map(events, func(item gen.Event, index int) entity.NodeSaleEvent {
|
||||
var meta entity.MetadataEventPurchase
|
||||
err := json.Unmarshal(item.Metadata, &meta)
|
||||
if err != nil {
|
||||
meta = entity.MetadataEventPurchase{}
|
||||
}
|
||||
return entity.NodeSaleEvent{
|
||||
TxHash: item.TxHash,
|
||||
BlockHeight: item.BlockHeight,
|
||||
TxIndex: item.TxIndex,
|
||||
WalletAddress: item.WalletAddress,
|
||||
Valid: item.Valid,
|
||||
Action: item.Action,
|
||||
RawMessage: item.RawMessage,
|
||||
ParsedMessage: item.ParsedMessage,
|
||||
BlockTimestamp: item.BlockTimestamp.Time.UTC(),
|
||||
BlockHash: item.BlockHash,
|
||||
Metadata: &meta,
|
||||
}
|
||||
})
|
||||
}
|
||||
247
modules/nodesale/repository/postgres/repository.go
Normal file
247
modules/nodesale/repository/postgres/repository.go
Normal file
@@ -0,0 +1,247 @@
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/internal/postgres"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/internal/entity"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/repository/postgres/gen"
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/pgtype"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
type Repository struct {
|
||||
db postgres.DB
|
||||
queries *gen.Queries
|
||||
tx pgx.Tx
|
||||
}
|
||||
|
||||
func NewRepository(db postgres.DB) *Repository {
|
||||
return &Repository{
|
||||
db: db,
|
||||
queries: gen.New(db),
|
||||
}
|
||||
}
|
||||
|
||||
func (repo *Repository) CreateBlock(ctx context.Context, arg entity.Block) error {
|
||||
err := repo.queries.CreateBlock(ctx, gen.CreateBlockParams{
|
||||
BlockHeight: arg.BlockHeight,
|
||||
BlockHash: arg.BlockHash,
|
||||
Module: arg.Module,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Cannot Add block")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (repo *Repository) GetBlock(ctx context.Context, blockHeight int64) (*entity.Block, error) {
|
||||
block, err := repo.queries.GetBlock(ctx, blockHeight)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Cannot get block")
|
||||
}
|
||||
return &entity.Block{
|
||||
BlockHeight: block.BlockHeight,
|
||||
BlockHash: block.BlockHash,
|
||||
Module: block.Module,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (repo *Repository) GetLastProcessedBlock(ctx context.Context) (*entity.Block, error) {
|
||||
block, err := repo.queries.GetLastProcessedBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Cannot get last processed block")
|
||||
}
|
||||
return &entity.Block{
|
||||
BlockHeight: block.BlockHeight,
|
||||
BlockHash: block.BlockHash,
|
||||
Module: block.Module,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (repo *Repository) RemoveBlockFrom(ctx context.Context, fromBlock int64) (int64, error) {
|
||||
affected, err := repo.queries.RemoveBlockFrom(ctx, fromBlock)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "Cannot remove blocks")
|
||||
}
|
||||
return affected, nil
|
||||
}
|
||||
|
||||
func (repo *Repository) RemoveEventsFromBlock(ctx context.Context, fromBlock int64) (int64, error) {
|
||||
affected, err := repo.queries.RemoveEventsFromBlock(ctx, fromBlock)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "Cannot remove events")
|
||||
}
|
||||
return affected, nil
|
||||
}
|
||||
|
||||
func (repo *Repository) ClearDelegate(ctx context.Context) (int64, error) {
|
||||
affected, err := repo.queries.ClearDelegate(ctx)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "Cannot clear delegate")
|
||||
}
|
||||
return affected, nil
|
||||
}
|
||||
|
||||
func (repo *Repository) GetNodesByIds(ctx context.Context, arg datagateway.GetNodesByIdsParams) ([]entity.Node, error) {
|
||||
nodes, err := repo.queries.GetNodesByIds(ctx, gen.GetNodesByIdsParams{
|
||||
SaleBlock: int64(arg.SaleBlock),
|
||||
SaleTxIndex: int32(arg.SaleTxIndex),
|
||||
NodeIds: lo.Map(arg.NodeIds, func(item uint32, index int) int32 { return int32(item) }),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Cannot get nodes")
|
||||
}
|
||||
return mapNodes(nodes), nil
|
||||
}
|
||||
|
||||
func (repo *Repository) CreateEvent(ctx context.Context, arg entity.NodeSaleEvent) error {
|
||||
metaDataBytes := []byte("{}")
|
||||
if arg.Metadata != nil {
|
||||
metaDataBytes, _ = json.Marshal(arg.Metadata)
|
||||
}
|
||||
err := repo.queries.CreateEvent(ctx, gen.CreateEventParams{
|
||||
TxHash: arg.TxHash,
|
||||
BlockHeight: arg.BlockHeight,
|
||||
TxIndex: arg.TxIndex,
|
||||
WalletAddress: arg.WalletAddress,
|
||||
Valid: arg.Valid,
|
||||
Action: arg.Action,
|
||||
RawMessage: arg.RawMessage,
|
||||
ParsedMessage: arg.ParsedMessage,
|
||||
BlockTimestamp: pgtype.Timestamp{Time: arg.BlockTimestamp.UTC(), Valid: true},
|
||||
BlockHash: arg.BlockHash,
|
||||
Metadata: metaDataBytes,
|
||||
Reason: arg.Reason,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Cannot add event")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (repo *Repository) SetDelegates(ctx context.Context, arg datagateway.SetDelegatesParams) (int64, error) {
|
||||
affected, err := repo.queries.SetDelegates(ctx, gen.SetDelegatesParams{
|
||||
SaleBlock: int64(arg.SaleBlock),
|
||||
SaleTxIndex: arg.SaleTxIndex,
|
||||
Delegatee: arg.Delegatee,
|
||||
DelegateTxHash: arg.DelegateTxHash,
|
||||
NodeIds: lo.Map(arg.NodeIds, func(item uint32, index int) int32 { return int32(item) }),
|
||||
})
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "Cannot set delegate")
|
||||
}
|
||||
return affected, nil
|
||||
}
|
||||
|
||||
func (repo *Repository) CreateNodeSale(ctx context.Context, arg entity.NodeSale) error {
|
||||
err := repo.queries.CreateNodeSale(ctx, gen.CreateNodeSaleParams{
|
||||
BlockHeight: int64(arg.BlockHeight),
|
||||
TxIndex: int32(arg.TxIndex),
|
||||
Name: arg.Name,
|
||||
StartsAt: pgtype.Timestamp{Time: arg.StartsAt.UTC(), Valid: true},
|
||||
EndsAt: pgtype.Timestamp{Time: arg.EndsAt.UTC(), Valid: true},
|
||||
Tiers: arg.Tiers,
|
||||
SellerPublicKey: arg.SellerPublicKey,
|
||||
MaxPerAddress: int32(arg.MaxPerAddress),
|
||||
DeployTxHash: arg.DeployTxHash,
|
||||
MaxDiscountPercentage: arg.MaxDiscountPercentage,
|
||||
SellerWallet: arg.SellerWallet,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Cannot add NodeSale")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (repo *Repository) GetNodeSale(ctx context.Context, arg datagateway.GetNodeSaleParams) ([]entity.NodeSale, error) {
|
||||
nodeSales, err := repo.queries.GetNodeSale(ctx, gen.GetNodeSaleParams{
|
||||
BlockHeight: int64(arg.BlockHeight),
|
||||
TxIndex: int32(arg.TxIndex),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Cannot get NodeSale")
|
||||
}
|
||||
|
||||
return mapNodeSales(nodeSales), nil
|
||||
}
|
||||
|
||||
func (repo *Repository) GetNodesByOwner(ctx context.Context, arg datagateway.GetNodesByOwnerParams) ([]entity.Node, error) {
|
||||
nodes, err := repo.queries.GetNodesByOwner(ctx, gen.GetNodesByOwnerParams{
|
||||
SaleBlock: int64(arg.SaleBlock),
|
||||
SaleTxIndex: int32(arg.SaleTxIndex),
|
||||
OwnerPublicKey: arg.OwnerPublicKey,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Cannot get nodes by owner")
|
||||
}
|
||||
return mapNodes(nodes), nil
|
||||
}
|
||||
|
||||
func (repo *Repository) CreateNode(ctx context.Context, arg entity.Node) error {
|
||||
err := repo.queries.CreateNode(ctx, gen.CreateNodeParams{
|
||||
SaleBlock: int64(arg.SaleBlock),
|
||||
SaleTxIndex: int32(arg.SaleTxIndex),
|
||||
NodeID: int32(arg.NodeID),
|
||||
TierIndex: arg.TierIndex,
|
||||
DelegatedTo: arg.DelegatedTo,
|
||||
OwnerPublicKey: arg.OwnerPublicKey,
|
||||
PurchaseTxHash: arg.PurchaseTxHash,
|
||||
DelegateTxHash: arg.DelegateTxHash,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Cannot add node")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (repo *Repository) GetNodeCountByTierIndex(ctx context.Context, arg datagateway.GetNodeCountByTierIndexParams) ([]datagateway.GetNodeCountByTierIndexRow, error) {
|
||||
nodeCount, err := repo.queries.GetNodeCountByTierIndex(ctx, gen.GetNodeCountByTierIndexParams{
|
||||
SaleBlock: int64(arg.SaleBlock),
|
||||
SaleTxIndex: int32(arg.SaleTxIndex),
|
||||
FromTier: int32(arg.FromTier),
|
||||
ToTier: int32(arg.ToTier),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Cannot get node count by tier index")
|
||||
}
|
||||
|
||||
return mapNodeCountByTierIndexRows(nodeCount), nil
|
||||
}
|
||||
|
||||
func (repo *Repository) GetNodesByPubkey(ctx context.Context, arg datagateway.GetNodesByPubkeyParams) ([]entity.Node, error) {
|
||||
nodes, err := repo.queries.GetNodesByPubkey(ctx, gen.GetNodesByPubkeyParams{
|
||||
SaleBlock: arg.SaleBlock,
|
||||
SaleTxIndex: arg.SaleTxIndex,
|
||||
OwnerPublicKey: arg.OwnerPublicKey,
|
||||
DelegatedTo: arg.DelegatedTo,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Cannot get nodes by public key")
|
||||
}
|
||||
return mapNodes(nodes), nil
|
||||
}
|
||||
|
||||
func (repo *Repository) GetEventsByWallet(ctx context.Context, walletAddress string) ([]entity.NodeSaleEvent, error) {
|
||||
events, err := repo.queries.GetEventsByWallet(ctx, walletAddress)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "cannot get events by wallet")
|
||||
}
|
||||
return mapNodeSalesEvents(events), nil
|
||||
}
|
||||
|
||||
func (repo *Repository) GetNodesByDeployment(ctx context.Context, saleBlock int64, saleTxIndex int32) ([]entity.Node, error) {
|
||||
nodes, err := repo.queries.GetNodesByDeployment(ctx, gen.GetNodesByDeploymentParams{
|
||||
SaleBlock: saleBlock,
|
||||
SaleTxIndex: saleTxIndex,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "cannot get nodes by deploy")
|
||||
}
|
||||
return mapNodes(nodes), nil
|
||||
}
|
||||
62
modules/nodesale/repository/postgres/tx.go
Normal file
62
modules/nodesale/repository/postgres/tx.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/modules/nodesale/datagateway"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
"github.com/jackc/pgx/v5"
|
||||
)
|
||||
|
||||
var ErrTxAlreadyExists = errors.New("Transaction already exists. Call Commit() or Rollback() first.")
|
||||
|
||||
func (r *Repository) begin(ctx context.Context) (*Repository, error) {
|
||||
if r.tx != nil {
|
||||
return nil, errors.WithStack(ErrTxAlreadyExists)
|
||||
}
|
||||
tx, err := r.db.Begin(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to begin transaction")
|
||||
}
|
||||
return &Repository{
|
||||
db: r.db,
|
||||
queries: r.queries.WithTx(tx),
|
||||
tx: tx,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *Repository) BeginNodeSaleTx(ctx context.Context) (datagateway.NodeSaleDataGatewayWithTx, error) {
|
||||
repo, err := r.begin(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return repo, nil
|
||||
}
|
||||
|
||||
func (r *Repository) Commit(ctx context.Context) error {
|
||||
if r.tx == nil {
|
||||
return nil
|
||||
}
|
||||
err := r.tx.Commit(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to commit transaction")
|
||||
}
|
||||
r.tx = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) Rollback(ctx context.Context) error {
|
||||
if r.tx == nil {
|
||||
return nil
|
||||
}
|
||||
err := r.tx.Rollback(ctx)
|
||||
if err != nil && !errors.Is(err, pgx.ErrTxClosed) {
|
||||
return errors.Wrap(err, "failed to rollback transaction")
|
||||
}
|
||||
if err == nil {
|
||||
logger.DebugContext(ctx, "rolled back transaction")
|
||||
}
|
||||
r.tx = nil
|
||||
return nil
|
||||
}
|
||||
25
modules/nodesale/tapscript.go
Normal file
25
modules/nodesale/tapscript.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package nodesale
|
||||
|
||||
import "github.com/btcsuite/btcd/txscript"
|
||||
|
||||
func extractTapScript(witness [][]byte) (tokenizer txscript.ScriptTokenizer, controlBlock *txscript.ControlBlock, isTapScript bool) {
|
||||
witness = removeAnnexFromWitness(witness)
|
||||
if len(witness) < 2 {
|
||||
return txscript.ScriptTokenizer{}, nil, false
|
||||
}
|
||||
script := witness[len(witness)-2]
|
||||
rawControl := witness[len(witness)-1]
|
||||
parsedControl, err := txscript.ParseControlBlock(rawControl)
|
||||
if err != nil {
|
||||
return txscript.ScriptTokenizer{}, nil, false
|
||||
}
|
||||
|
||||
return txscript.MakeScriptTokenizer(0, script), parsedControl, true
|
||||
}
|
||||
|
||||
func removeAnnexFromWitness(witness [][]byte) [][]byte {
|
||||
if len(witness) >= 2 && len(witness[len(witness)-1]) > 0 && witness[len(witness)-1][0] == txscript.TaprootAnnexTag {
|
||||
return witness[:len(witness)-1]
|
||||
}
|
||||
return witness
|
||||
}
|
||||
@@ -1,29 +1,49 @@
|
||||
package httphandler
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"net/url"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/runes"
|
||||
"github.com/gaze-network/uint128"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
type getBalancesByAddressRequest struct {
|
||||
type getBalancesRequest struct {
|
||||
paginationRequest
|
||||
Wallet string `params:"wallet"`
|
||||
Id string `query:"id"`
|
||||
BlockHeight uint64 `query:"blockHeight"`
|
||||
}
|
||||
|
||||
func (r getBalancesByAddressRequest) Validate() error {
|
||||
const (
|
||||
getBalancesMaxLimit = 5000
|
||||
getBalancesDefaultLimit = 100
|
||||
)
|
||||
|
||||
func (r *getBalancesRequest) Validate() error {
|
||||
var errList []error
|
||||
if r.Wallet == "" {
|
||||
errList = append(errList, errors.New("'wallet' is required"))
|
||||
}
|
||||
if r.Id != "" && !isRuneIdOrRuneName(r.Id) {
|
||||
errList = append(errList, errors.New("'id' is not valid rune id or rune name"))
|
||||
if r.Id != "" {
|
||||
id, err := url.QueryUnescape(r.Id)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
r.Id = id
|
||||
if !isRuneIdOrRuneName(r.Id) {
|
||||
errList = append(errList, errors.Errorf("id '%s' is not valid rune id or rune name", r.Id))
|
||||
}
|
||||
}
|
||||
if r.Limit < 0 {
|
||||
errList = append(errList, errors.New("'limit' must be non-negative"))
|
||||
}
|
||||
if r.Limit > getBalancesMaxLimit {
|
||||
errList = append(errList, errors.Errorf("'limit' cannot exceed %d", getBalancesMaxLimit))
|
||||
}
|
||||
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
|
||||
}
|
||||
@@ -36,15 +56,15 @@ type balance struct {
|
||||
Decimals uint8 `json:"decimals"`
|
||||
}
|
||||
|
||||
type getBalancesByAddressResult struct {
|
||||
type getBalancesResult struct {
|
||||
List []balance `json:"list"`
|
||||
BlockHeight uint64 `json:"blockHeight"`
|
||||
}
|
||||
|
||||
type getBalancesByAddressResponse = HttpResponse[getBalancesByAddressResult]
|
||||
type getBalancesResponse = HttpResponse[getBalancesResult]
|
||||
|
||||
func (h *HttpHandler) GetBalancesByAddress(ctx *fiber.Ctx) (err error) {
|
||||
var req getBalancesByAddressRequest
|
||||
func (h *HttpHandler) GetBalances(ctx *fiber.Ctx) (err error) {
|
||||
var req getBalancesRequest
|
||||
if err := ctx.ParamsParser(&req); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
@@ -54,6 +74,9 @@ func (h *HttpHandler) GetBalancesByAddress(ctx *fiber.Ctx) (err error) {
|
||||
if err := req.Validate(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := req.ParseDefault(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
pkScript, ok := resolvePkScript(h.network, req.Wallet)
|
||||
if !ok {
|
||||
@@ -64,49 +87,52 @@ func (h *HttpHandler) GetBalancesByAddress(ctx *fiber.Ctx) (err error) {
|
||||
if blockHeight == 0 {
|
||||
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
return errs.NewPublicError("latest block not found")
|
||||
}
|
||||
return errors.Wrap(err, "error during GetLatestBlock")
|
||||
}
|
||||
blockHeight = uint64(blockHeader.Height)
|
||||
}
|
||||
|
||||
balances, err := h.usecase.GetBalancesByPkScript(ctx.UserContext(), pkScript, blockHeight)
|
||||
balances, err := h.usecase.GetBalancesByPkScript(ctx.UserContext(), pkScript, blockHeight, req.Limit, req.Offset)
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
return errs.NewPublicError("balances not found")
|
||||
}
|
||||
return errors.Wrap(err, "error during GetBalancesByPkScript")
|
||||
}
|
||||
|
||||
runeId, ok := h.resolveRuneId(ctx.UserContext(), req.Id)
|
||||
if ok {
|
||||
// filter out balances that don't match the requested rune id
|
||||
for key := range balances {
|
||||
if key != runeId {
|
||||
delete(balances, key)
|
||||
}
|
||||
}
|
||||
balances = lo.Filter(balances, func(b *entity.Balance, _ int) bool {
|
||||
return b.RuneId == runeId
|
||||
})
|
||||
}
|
||||
|
||||
balanceRuneIds := lo.Keys(balances)
|
||||
balanceRuneIds := lo.Map(balances, func(b *entity.Balance, _ int) runes.RuneId {
|
||||
return b.RuneId
|
||||
})
|
||||
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx.UserContext(), balanceRuneIds)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch")
|
||||
}
|
||||
|
||||
balanceList := make([]balance, 0, len(balances))
|
||||
for id, b := range balances {
|
||||
runeEntry := runeEntries[id]
|
||||
for _, b := range balances {
|
||||
runeEntry := runeEntries[b.RuneId]
|
||||
balanceList = append(balanceList, balance{
|
||||
Amount: b.Amount,
|
||||
Id: id,
|
||||
Id: b.RuneId,
|
||||
Name: runeEntry.SpacedRune,
|
||||
Symbol: string(runeEntry.Symbol),
|
||||
Decimals: runeEntry.Divisibility,
|
||||
})
|
||||
}
|
||||
slices.SortFunc(balanceList, func(i, j balance) int {
|
||||
return j.Amount.Cmp(i.Amount)
|
||||
})
|
||||
|
||||
resp := getBalancesByAddressResponse{
|
||||
Result: &getBalancesByAddressResult{
|
||||
resp := getBalancesResponse{
|
||||
Result: &getBalancesResult{
|
||||
BlockHeight: blockHeight,
|
||||
List: balanceList,
|
||||
},
|
||||
|
||||
@@ -3,10 +3,11 @@ package httphandler
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/runes"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/samber/lo"
|
||||
"golang.org/x/sync/errgroup"
|
||||
@@ -16,33 +17,49 @@ type getBalanceQuery struct {
|
||||
Wallet string `json:"wallet"`
|
||||
Id string `json:"id"`
|
||||
BlockHeight uint64 `json:"blockHeight"`
|
||||
Limit int32 `json:"limit"`
|
||||
Offset int32 `json:"offset"`
|
||||
}
|
||||
|
||||
type getBalancesByAddressBatchRequest struct {
|
||||
type getBalancesBatchRequest struct {
|
||||
Queries []getBalanceQuery `json:"queries"`
|
||||
}
|
||||
|
||||
func (r getBalancesByAddressBatchRequest) Validate() error {
|
||||
const getBalancesBatchMaxQueries = 100
|
||||
|
||||
func (r getBalancesBatchRequest) Validate() error {
|
||||
var errList []error
|
||||
for _, query := range r.Queries {
|
||||
if len(r.Queries) == 0 {
|
||||
errList = append(errList, errors.New("at least one query is required"))
|
||||
}
|
||||
if len(r.Queries) > getBalancesBatchMaxQueries {
|
||||
errList = append(errList, errors.Errorf("cannot exceed %d queries", getBalancesBatchMaxQueries))
|
||||
}
|
||||
for i, query := range r.Queries {
|
||||
if query.Wallet == "" {
|
||||
errList = append(errList, errors.Errorf("queries[%d]: 'wallet' is required"))
|
||||
errList = append(errList, errors.Errorf("queries[%d]: 'wallet' is required", i))
|
||||
}
|
||||
if query.Id != "" && !isRuneIdOrRuneName(query.Id) {
|
||||
errList = append(errList, errors.Errorf("queries[%d]: 'id' is not valid rune id or rune name"))
|
||||
errList = append(errList, errors.Errorf("queries[%d]: id '%s' is not valid rune id or rune name", i, query.Id))
|
||||
}
|
||||
if query.Limit < 0 {
|
||||
errList = append(errList, errors.Errorf("queries[%d]: 'limit' must be non-negative", i))
|
||||
}
|
||||
if query.Limit > getBalancesMaxLimit {
|
||||
errList = append(errList, errors.Errorf("queries[%d]: 'limit' cannot exceed %d", i, getBalancesMaxLimit))
|
||||
}
|
||||
}
|
||||
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
|
||||
}
|
||||
|
||||
type getBalancesByAddressBatchResult struct {
|
||||
List []*getBalancesByAddressResult `json:"list"`
|
||||
type getBalancesBatchResult struct {
|
||||
List []*getBalancesResult `json:"list"`
|
||||
}
|
||||
|
||||
type getBalancesByAddressBatchResponse = HttpResponse[getBalancesByAddressBatchResult]
|
||||
type getBalancesBatchResponse = HttpResponse[getBalancesBatchResult]
|
||||
|
||||
func (h *HttpHandler) GetBalancesByAddressBatch(ctx *fiber.Ctx) (err error) {
|
||||
var req getBalancesByAddressBatchRequest
|
||||
func (h *HttpHandler) GetBalancesBatch(ctx *fiber.Ctx) (err error) {
|
||||
var req getBalancesBatchRequest
|
||||
if err := ctx.BodyParser(&req); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
@@ -53,11 +70,14 @@ func (h *HttpHandler) GetBalancesByAddressBatch(ctx *fiber.Ctx) (err error) {
|
||||
var latestBlockHeight uint64
|
||||
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
return errs.NewPublicError("latest block not found")
|
||||
}
|
||||
return errors.Wrap(err, "error during GetLatestBlock")
|
||||
}
|
||||
latestBlockHeight = uint64(blockHeader.Height)
|
||||
|
||||
processQuery := func(ctx context.Context, query getBalanceQuery, queryIndex int) (*getBalancesByAddressResult, error) {
|
||||
processQuery := func(ctx context.Context, query getBalanceQuery, queryIndex int) (*getBalancesResult, error) {
|
||||
pkScript, ok := resolvePkScript(h.network, query.Wallet)
|
||||
if !ok {
|
||||
return nil, errs.NewPublicError(fmt.Sprintf("unable to resolve pkscript from \"queries[%d].wallet\"", queryIndex))
|
||||
@@ -68,50 +88,57 @@ func (h *HttpHandler) GetBalancesByAddressBatch(ctx *fiber.Ctx) (err error) {
|
||||
blockHeight = latestBlockHeight
|
||||
}
|
||||
|
||||
balances, err := h.usecase.GetBalancesByPkScript(ctx, pkScript, blockHeight)
|
||||
if query.Limit == 0 {
|
||||
query.Limit = getBalancesDefaultLimit
|
||||
}
|
||||
|
||||
balances, err := h.usecase.GetBalancesByPkScript(ctx, pkScript, blockHeight, query.Limit, query.Offset)
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
return nil, errs.NewPublicError("balances not found")
|
||||
}
|
||||
return nil, errors.Wrap(err, "error during GetBalancesByPkScript")
|
||||
}
|
||||
|
||||
runeId, ok := h.resolveRuneId(ctx, query.Id)
|
||||
if ok {
|
||||
// filter out balances that don't match the requested rune id
|
||||
for key := range balances {
|
||||
if key != runeId {
|
||||
delete(balances, key)
|
||||
}
|
||||
}
|
||||
balances = lo.Filter(balances, func(b *entity.Balance, _ int) bool {
|
||||
return b.RuneId == runeId
|
||||
})
|
||||
}
|
||||
|
||||
balanceRuneIds := lo.Keys(balances)
|
||||
balanceRuneIds := lo.Map(balances, func(b *entity.Balance, _ int) runes.RuneId {
|
||||
return b.RuneId
|
||||
})
|
||||
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx, balanceRuneIds)
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
return nil, errs.NewPublicError("rune not found")
|
||||
}
|
||||
return nil, errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch")
|
||||
}
|
||||
|
||||
balanceList := make([]balance, 0, len(balances))
|
||||
for id, b := range balances {
|
||||
runeEntry := runeEntries[id]
|
||||
for _, b := range balances {
|
||||
runeEntry := runeEntries[b.RuneId]
|
||||
balanceList = append(balanceList, balance{
|
||||
Amount: b.Amount,
|
||||
Id: id,
|
||||
Id: b.RuneId,
|
||||
Name: runeEntry.SpacedRune,
|
||||
Symbol: string(runeEntry.Symbol),
|
||||
Decimals: runeEntry.Divisibility,
|
||||
})
|
||||
}
|
||||
slices.SortFunc(balanceList, func(i, j balance) int {
|
||||
return j.Amount.Cmp(i.Amount)
|
||||
})
|
||||
|
||||
result := getBalancesByAddressResult{
|
||||
result := getBalancesResult{
|
||||
BlockHeight: blockHeight,
|
||||
List: balanceList,
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
results := make([]*getBalancesByAddressResult, len(req.Queries))
|
||||
results := make([]*getBalancesResult, len(req.Queries))
|
||||
eg, ectx := errgroup.WithContext(ctx.UserContext())
|
||||
for i, query := range req.Queries {
|
||||
i := i
|
||||
@@ -129,8 +156,8 @@ func (h *HttpHandler) GetBalancesByAddressBatch(ctx *fiber.Ctx) (err error) {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
resp := getBalancesByAddressBatchResponse{
|
||||
Result: &getBalancesByAddressBatchResult{
|
||||
resp := getBalancesBatchResponse{
|
||||
Result: &getBalancesBatchResult{
|
||||
List: results,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,28 +1,12 @@
|
||||
package httphandler
|
||||
|
||||
import (
|
||||
"github.com/Cleverse/go-utilities/utils"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/constants"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
var startingBlockHeader = map[common.Network]types.BlockHeader{
|
||||
common.NetworkMainnet: {
|
||||
Height: 839999,
|
||||
Hash: *utils.Must(chainhash.NewHashFromStr("0000000000000000000172014ba58d66455762add0512355ad651207918494ab")),
|
||||
PrevBlock: *utils.Must(chainhash.NewHashFromStr("00000000000000000001dcce6ce7c8a45872cafd1fb04732b447a14a91832591")),
|
||||
},
|
||||
common.NetworkTestnet: {
|
||||
Height: 2583200,
|
||||
Hash: *utils.Must(chainhash.NewHashFromStr("000000000006c5f0dfcd9e0e81f27f97a87aef82087ffe69cd3c390325bb6541")),
|
||||
PrevBlock: *utils.Must(chainhash.NewHashFromStr("00000000000668f3bafac992f53424774515440cb47e1cb9e73af3f496139e28")),
|
||||
},
|
||||
}
|
||||
|
||||
type getCurrentBlockResult struct {
|
||||
Hash string `json:"hash"`
|
||||
Height int64 `json:"height"`
|
||||
@@ -36,7 +20,7 @@ func (h *HttpHandler) GetCurrentBlock(ctx *fiber.Ctx) (err error) {
|
||||
if !errors.Is(err, errs.NotFound) {
|
||||
return errors.Wrap(err, "error during GetLatestBlock")
|
||||
}
|
||||
blockHeader = startingBlockHeader[h.network]
|
||||
blockHeader = constants.StartingBlockHeader[h.network]
|
||||
}
|
||||
|
||||
resp := getCurrentBlockResponse{
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
package httphandler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"slices"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/runes"
|
||||
"github.com/gaze-network/uint128"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
@@ -12,14 +17,30 @@ import (
|
||||
)
|
||||
|
||||
type getHoldersRequest struct {
|
||||
paginationRequest
|
||||
Id string `params:"id"`
|
||||
BlockHeight uint64 `query:"blockHeight"`
|
||||
}
|
||||
|
||||
func (r getHoldersRequest) Validate() error {
|
||||
const (
|
||||
getHoldersMaxLimit = 1000
|
||||
)
|
||||
|
||||
func (r *getHoldersRequest) Validate() error {
|
||||
var errList []error
|
||||
id, err := url.QueryUnescape(r.Id)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
r.Id = id
|
||||
if !isRuneIdOrRuneName(r.Id) {
|
||||
errList = append(errList, errors.New("'id' is not valid rune id or rune name"))
|
||||
errList = append(errList, errors.Errorf("id '%s' is not valid rune id or rune name", r.Id))
|
||||
}
|
||||
if r.Limit < 0 {
|
||||
errList = append(errList, errors.New("'limit' must be non-negative"))
|
||||
}
|
||||
if r.Limit > getHoldersMaxLimit {
|
||||
errList = append(errList, errors.Errorf("'limit' cannot exceed %d", getHoldersMaxLimit))
|
||||
}
|
||||
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
|
||||
}
|
||||
@@ -35,6 +56,7 @@ type getHoldersResult struct {
|
||||
BlockHeight uint64 `json:"blockHeight"`
|
||||
TotalSupply uint128.Uint128 `json:"totalSupply"`
|
||||
MintedAmount uint128.Uint128 `json:"mintedAmount"`
|
||||
Decimals uint8 `json:"decimals"`
|
||||
List []holdingBalance `json:"list"`
|
||||
}
|
||||
|
||||
@@ -51,6 +73,9 @@ func (h *HttpHandler) GetHolders(ctx *fiber.Ctx) (err error) {
|
||||
if err := req.Validate(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := req.ParseDefault(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
blockHeight := req.BlockHeight
|
||||
if blockHeight == 0 {
|
||||
@@ -66,16 +91,22 @@ func (h *HttpHandler) GetHolders(ctx *fiber.Ctx) (err error) {
|
||||
var ok bool
|
||||
runeId, ok = h.resolveRuneId(ctx.UserContext(), req.Id)
|
||||
if !ok {
|
||||
return errs.NewPublicError("unable to resolve rune id from \"id\"")
|
||||
return errs.NewPublicError(fmt.Sprintf("unable to resolve rune id \"%s\" from \"id\"", req.Id))
|
||||
}
|
||||
}
|
||||
|
||||
runeEntry, err := h.usecase.GetRuneEntryByRuneIdAndHeight(ctx.UserContext(), runeId, blockHeight)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error during GetHoldersByHeight")
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
return errs.NewPublicError("rune not found")
|
||||
}
|
||||
return errors.Wrap(err, "error during GetRuneEntryByRuneIdAndHeight")
|
||||
}
|
||||
holdingBalances, err := h.usecase.GetBalancesByRuneId(ctx.UserContext(), runeId, blockHeight)
|
||||
holdingBalances, err := h.usecase.GetBalancesByRuneId(ctx.UserContext(), runeId, blockHeight, req.Limit, req.Offset)
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
return errs.NewPublicError("balances not found")
|
||||
}
|
||||
return errors.Wrap(err, "error during GetBalancesByRuneId")
|
||||
}
|
||||
|
||||
@@ -101,11 +132,20 @@ func (h *HttpHandler) GetHolders(ctx *fiber.Ctx) (err error) {
|
||||
})
|
||||
}
|
||||
|
||||
// sort by amount descending, then pk script ascending
|
||||
slices.SortFunc(holdingBalances, func(b1, b2 *entity.Balance) int {
|
||||
if b1.Amount.Cmp(b2.Amount) == 0 {
|
||||
return bytes.Compare(b1.PkScript, b2.PkScript)
|
||||
}
|
||||
return b2.Amount.Cmp(b1.Amount)
|
||||
})
|
||||
|
||||
resp := getHoldersResponse{
|
||||
Result: &getHoldersResult{
|
||||
BlockHeight: blockHeight,
|
||||
TotalSupply: totalSupply,
|
||||
MintedAmount: mintedAmount,
|
||||
Decimals: runeEntry.Divisibility,
|
||||
List: list,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
package httphandler
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/runes"
|
||||
"github.com/gaze-network/uint128"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
@@ -13,15 +14,29 @@ import (
|
||||
)
|
||||
|
||||
type getTokenInfoRequest struct {
|
||||
Id string `params:"id"`
|
||||
BlockHeight uint64 `query:"blockHeight"`
|
||||
Id string `params:"id"`
|
||||
BlockHeight uint64 `query:"blockHeight"`
|
||||
AdditionalFieldsRaw string `query:"additionalFields"` // comma-separated list of additional fields
|
||||
AdditionalFields []string
|
||||
}
|
||||
|
||||
func (r getTokenInfoRequest) Validate() error {
|
||||
func (r *getTokenInfoRequest) Validate() error {
|
||||
var errList []error
|
||||
if !isRuneIdOrRuneName(r.Id) {
|
||||
errList = append(errList, errors.New("'id' is not valid rune id or rune name"))
|
||||
id, err := url.QueryUnescape(r.Id)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
r.Id = id
|
||||
if !isRuneIdOrRuneName(r.Id) {
|
||||
errList = append(errList, errors.Errorf("id '%s' is not valid rune id or rune name", r.Id))
|
||||
}
|
||||
|
||||
if r.AdditionalFieldsRaw == "" {
|
||||
// temporarily set default value for backward compatibility
|
||||
r.AdditionalFieldsRaw = "holdersCount" // TODO: remove this default value after all clients are updated
|
||||
}
|
||||
r.AdditionalFields = strings.Split(r.AdditionalFieldsRaw, ",")
|
||||
|
||||
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
|
||||
}
|
||||
|
||||
@@ -35,17 +50,19 @@ type entryTerms struct {
|
||||
}
|
||||
|
||||
type entry struct {
|
||||
Divisibility uint8 `json:"divisibility"`
|
||||
Premine uint128.Uint128 `json:"premine"`
|
||||
Rune runes.Rune `json:"rune"`
|
||||
Spacers uint32 `json:"spacers"`
|
||||
Symbol string `json:"symbol"`
|
||||
Terms entryTerms `json:"terms"`
|
||||
Turbo bool `json:"turbo"`
|
||||
Divisibility uint8 `json:"divisibility"`
|
||||
Premine uint128.Uint128 `json:"premine"`
|
||||
Rune runes.Rune `json:"rune"`
|
||||
Spacers uint32 `json:"spacers"`
|
||||
Symbol string `json:"symbol"`
|
||||
Terms entryTerms `json:"terms"`
|
||||
Turbo bool `json:"turbo"`
|
||||
EtchingTxHash string `json:"etchingTxHash"`
|
||||
}
|
||||
|
||||
type tokenInfoExtend struct {
|
||||
Entry entry `json:"entry"`
|
||||
HoldersCount *int64 `json:"holdersCount,omitempty"`
|
||||
Entry entry `json:"entry"`
|
||||
}
|
||||
|
||||
type getTokenInfoResult struct {
|
||||
@@ -57,11 +74,11 @@ type getTokenInfoResult struct {
|
||||
MintedAmount uint128.Uint128 `json:"mintedAmount"`
|
||||
BurnedAmount uint128.Uint128 `json:"burnedAmount"`
|
||||
Decimals uint8 `json:"decimals"`
|
||||
DeployedAt uint64 `json:"deployedAt"` // unix timestamp
|
||||
DeployedAt int64 `json:"deployedAt"` // unix timestamp
|
||||
DeployedAtHeight uint64 `json:"deployedAtHeight"`
|
||||
CompletedAt *uint64 `json:"completedAt"` // unix timestamp
|
||||
CompletedAt *int64 `json:"completedAt"` // unix timestamp
|
||||
CompletedAtHeight *uint64 `json:"completedAtHeight"`
|
||||
HoldersCount int `json:"holdersCount"`
|
||||
HoldersCount int64 `json:"holdersCount"` // deprecated // TODO: remove later
|
||||
Extend tokenInfoExtend `json:"extend"`
|
||||
}
|
||||
|
||||
@@ -83,6 +100,9 @@ func (h *HttpHandler) GetTokenInfo(ctx *fiber.Ctx) (err error) {
|
||||
if blockHeight == 0 {
|
||||
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
return errs.NewPublicError("latest block not found")
|
||||
}
|
||||
return errors.Wrap(err, "error during GetLatestBlock")
|
||||
}
|
||||
blockHeight = uint64(blockHeader.Height)
|
||||
@@ -93,73 +113,87 @@ func (h *HttpHandler) GetTokenInfo(ctx *fiber.Ctx) (err error) {
|
||||
var ok bool
|
||||
runeId, ok = h.resolveRuneId(ctx.UserContext(), req.Id)
|
||||
if !ok {
|
||||
return errs.NewPublicError("unable to resolve rune id from \"id\"")
|
||||
return errs.NewPublicError(fmt.Sprintf("unable to resolve rune id \"%s\" from \"id\"", req.Id))
|
||||
}
|
||||
}
|
||||
|
||||
runeEntry, err := h.usecase.GetRuneEntryByRuneIdAndHeight(ctx.UserContext(), runeId, blockHeight)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error during GetTokenInfoByHeight")
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
return errs.NewPublicError("rune not found")
|
||||
}
|
||||
return errors.Wrap(err, "error during GetRuneEntryByRuneIdAndHeight")
|
||||
}
|
||||
holdingBalances, err := h.usecase.GetBalancesByRuneId(ctx.UserContext(), runeId, blockHeight)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error during GetBalancesByRuneId")
|
||||
var holdersCountPtr *int64
|
||||
if lo.Contains(req.AdditionalFields, "holdersCount") {
|
||||
holdersCount, err := h.usecase.GetTotalHoldersByRuneId(ctx.UserContext(), runeId, blockHeight)
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
return errs.NewPublicError("rune not found")
|
||||
}
|
||||
return errors.Wrap(err, "error during GetBalancesByRuneId")
|
||||
}
|
||||
holdersCountPtr = &holdersCount
|
||||
}
|
||||
|
||||
holdingBalances = lo.Filter(holdingBalances, func(b *entity.Balance, _ int) bool {
|
||||
return !b.Amount.IsZero()
|
||||
})
|
||||
// sort by amount descending
|
||||
slices.SortFunc(holdingBalances, func(i, j *entity.Balance) int {
|
||||
return j.Amount.Cmp(i.Amount)
|
||||
})
|
||||
|
||||
totalSupply, err := runeEntry.Supply()
|
||||
result, err := createTokenInfoResult(runeEntry, holdersCountPtr)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot get total supply of rune")
|
||||
return errors.Wrap(err, "error during createTokenInfoResult")
|
||||
}
|
||||
mintedAmount, err := runeEntry.MintedAmount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot get minted amount of rune")
|
||||
}
|
||||
circulatingSupply := mintedAmount.Sub(runeEntry.BurnedAmount)
|
||||
|
||||
terms := lo.FromPtr(runeEntry.Terms)
|
||||
resp := getTokenInfoResponse{
|
||||
Result: &getTokenInfoResult{
|
||||
Id: runeId,
|
||||
Name: runeEntry.SpacedRune,
|
||||
Symbol: string(runeEntry.Symbol),
|
||||
TotalSupply: totalSupply,
|
||||
CirculatingSupply: circulatingSupply,
|
||||
MintedAmount: mintedAmount,
|
||||
BurnedAmount: runeEntry.BurnedAmount,
|
||||
Decimals: runeEntry.Divisibility,
|
||||
DeployedAt: uint64(runeEntry.EtchedAt.Unix()),
|
||||
DeployedAtHeight: runeEntry.EtchingBlock,
|
||||
CompletedAt: lo.Ternary(runeEntry.CompletedAt.IsZero(), nil, lo.ToPtr(uint64(runeEntry.CompletedAt.Unix()))),
|
||||
CompletedAtHeight: runeEntry.CompletedAtHeight,
|
||||
HoldersCount: len(holdingBalances),
|
||||
Extend: tokenInfoExtend{
|
||||
Entry: entry{
|
||||
Divisibility: runeEntry.Divisibility,
|
||||
Premine: runeEntry.Premine,
|
||||
Rune: runeEntry.SpacedRune.Rune,
|
||||
Spacers: runeEntry.SpacedRune.Spacers,
|
||||
Symbol: string(runeEntry.Symbol),
|
||||
Terms: entryTerms{
|
||||
Amount: lo.FromPtr(terms.Amount),
|
||||
Cap: lo.FromPtr(terms.Cap),
|
||||
HeightStart: terms.HeightStart,
|
||||
HeightEnd: terms.HeightEnd,
|
||||
OffsetStart: terms.OffsetStart,
|
||||
OffsetEnd: terms.OffsetEnd,
|
||||
},
|
||||
Turbo: runeEntry.Turbo,
|
||||
},
|
||||
},
|
||||
},
|
||||
Result: result,
|
||||
}
|
||||
|
||||
return errors.WithStack(ctx.JSON(resp))
|
||||
}
|
||||
|
||||
func createTokenInfoResult(runeEntry *runes.RuneEntry, holdersCount *int64) (*getTokenInfoResult, error) {
|
||||
totalSupply, err := runeEntry.Supply()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "cannot get total supply of rune")
|
||||
}
|
||||
mintedAmount, err := runeEntry.MintedAmount()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "cannot get minted amount of rune")
|
||||
}
|
||||
circulatingSupply := mintedAmount.Sub(runeEntry.BurnedAmount)
|
||||
|
||||
terms := lo.FromPtr(runeEntry.Terms)
|
||||
|
||||
return &getTokenInfoResult{
|
||||
Id: runeEntry.RuneId,
|
||||
Name: runeEntry.SpacedRune,
|
||||
Symbol: string(runeEntry.Symbol),
|
||||
TotalSupply: totalSupply,
|
||||
CirculatingSupply: circulatingSupply,
|
||||
MintedAmount: mintedAmount,
|
||||
BurnedAmount: runeEntry.BurnedAmount,
|
||||
Decimals: runeEntry.Divisibility,
|
||||
DeployedAt: runeEntry.EtchedAt.Unix(),
|
||||
DeployedAtHeight: runeEntry.EtchingBlock,
|
||||
CompletedAt: lo.Ternary(runeEntry.CompletedAt.IsZero(), nil, lo.ToPtr(runeEntry.CompletedAt.Unix())),
|
||||
CompletedAtHeight: runeEntry.CompletedAtHeight,
|
||||
HoldersCount: lo.FromPtr(holdersCount),
|
||||
Extend: tokenInfoExtend{
|
||||
HoldersCount: holdersCount,
|
||||
Entry: entry{
|
||||
Divisibility: runeEntry.Divisibility,
|
||||
Premine: runeEntry.Premine,
|
||||
Rune: runeEntry.SpacedRune.Rune,
|
||||
Spacers: runeEntry.SpacedRune.Spacers,
|
||||
Symbol: string(runeEntry.Symbol),
|
||||
Terms: entryTerms{
|
||||
Amount: lo.FromPtr(terms.Amount),
|
||||
Cap: lo.FromPtr(terms.Cap),
|
||||
HeightStart: terms.HeightStart,
|
||||
HeightEnd: terms.HeightEnd,
|
||||
OffsetStart: terms.OffsetStart,
|
||||
OffsetEnd: terms.OffsetEnd,
|
||||
},
|
||||
Turbo: runeEntry.Turbo,
|
||||
EtchingTxHash: runeEntry.EtchingTxHash.String(),
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
118
modules/runes/api/httphandler/get_token_info_batch.go
Normal file
118
modules/runes/api/httphandler/get_token_info_batch.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package httphandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/runes"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
type getTokenInfoBatchRequest struct {
|
||||
Ids []string `json:"ids"`
|
||||
BlockHeight uint64 `json:"blockHeight"`
|
||||
AdditionalFields []string `json:"additionalFields"`
|
||||
}
|
||||
|
||||
const getTokenInfoBatchMaxQueries = 100
|
||||
|
||||
func (r *getTokenInfoBatchRequest) Validate() error {
|
||||
var errList []error
|
||||
|
||||
if len(r.Ids) == 0 {
|
||||
errList = append(errList, errors.New("ids cannot be empty"))
|
||||
}
|
||||
if len(r.Ids) > getTokenInfoBatchMaxQueries {
|
||||
errList = append(errList, errors.Errorf("cannot query more than %d ids", getTokenInfoBatchMaxQueries))
|
||||
}
|
||||
for i := range r.Ids {
|
||||
id, err := url.QueryUnescape(r.Ids[i])
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
r.Ids[i] = id
|
||||
if !isRuneIdOrRuneName(r.Ids[i]) {
|
||||
errList = append(errList, errors.Errorf("ids[%d]: id '%s' is not valid rune id or rune name", i, r.Ids[i]))
|
||||
}
|
||||
}
|
||||
|
||||
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
|
||||
}
|
||||
|
||||
type getTokenInfoBatchResult struct {
|
||||
List []*getTokenInfoResult `json:"list"`
|
||||
}
|
||||
type getTokenInfoBatchResponse = HttpResponse[getTokenInfoBatchResult]
|
||||
|
||||
func (h *HttpHandler) GetTokenInfoBatch(ctx *fiber.Ctx) (err error) {
|
||||
var req getTokenInfoBatchRequest
|
||||
if err := ctx.BodyParser(&req); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := req.Validate(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
blockHeight := req.BlockHeight
|
||||
if blockHeight == 0 {
|
||||
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
return errs.NewPublicError("latest block not found")
|
||||
}
|
||||
return errors.Wrap(err, "error during GetLatestBlock")
|
||||
}
|
||||
blockHeight = uint64(blockHeader.Height)
|
||||
}
|
||||
|
||||
runeIds := make([]runes.RuneId, 0)
|
||||
for i, id := range req.Ids {
|
||||
runeId, ok := h.resolveRuneId(ctx.UserContext(), id)
|
||||
if !ok {
|
||||
return errs.NewPublicError(fmt.Sprintf("unable to resolve rune id \"%s\" from \"ids[%d]\"", id, i))
|
||||
}
|
||||
runeIds = append(runeIds, runeId)
|
||||
}
|
||||
|
||||
runeEntries, err := h.usecase.GetRuneEntryByRuneIdAndHeightBatch(ctx.UserContext(), runeIds, blockHeight)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error during GetRuneEntryByRuneIdAndHeightBatch")
|
||||
}
|
||||
holdersCounts := make(map[runes.RuneId]int64)
|
||||
if lo.Contains(req.AdditionalFields, "holdersCount") {
|
||||
holdersCounts, err = h.usecase.GetTotalHoldersByRuneIds(ctx.UserContext(), runeIds, blockHeight)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error during GetBalancesByRuneId")
|
||||
}
|
||||
}
|
||||
|
||||
results := make([]*getTokenInfoResult, 0, len(runeIds))
|
||||
|
||||
for _, runeId := range runeIds {
|
||||
runeEntry, ok := runeEntries[runeId]
|
||||
if !ok {
|
||||
return errs.NewPublicError(fmt.Sprintf("rune not found: %s", runeId))
|
||||
}
|
||||
var holdersCount *int64
|
||||
if lo.Contains(req.AdditionalFields, "holdersCount") {
|
||||
holdersCount = lo.ToPtr(holdersCounts[runeId])
|
||||
}
|
||||
|
||||
result, err := createTokenInfoResult(runeEntry, holdersCount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error during createTokenInfoResult")
|
||||
}
|
||||
results = append(results, result)
|
||||
}
|
||||
|
||||
resp := getTokenInfoBatchResponse{
|
||||
Result: &getTokenInfoBatchResult{
|
||||
List: results,
|
||||
},
|
||||
}
|
||||
|
||||
return errors.WithStack(ctx.JSON(resp))
|
||||
}
|
||||
150
modules/runes/api/httphandler/get_tokens.go
Normal file
150
modules/runes/api/httphandler/get_tokens.go
Normal file
@@ -0,0 +1,150 @@
|
||||
package httphandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/runes"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
const (
|
||||
getTokensMaxLimit = 1000
|
||||
)
|
||||
|
||||
type GetTokensScope string
|
||||
|
||||
const (
|
||||
GetTokensScopeAll GetTokensScope = "all"
|
||||
GetTokensScopeOngoing GetTokensScope = "ongoing"
|
||||
)
|
||||
|
||||
func (s GetTokensScope) IsValid() bool {
|
||||
switch s {
|
||||
case GetTokensScopeAll, GetTokensScopeOngoing:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type getTokensRequest struct {
|
||||
paginationRequest
|
||||
Search string `query:"search"`
|
||||
BlockHeight uint64 `query:"blockHeight"`
|
||||
Scope GetTokensScope `query:"scope"`
|
||||
AdditionalFieldsRaw string `query:"additionalFields"` // comma-separated list of additional fields
|
||||
AdditionalFields []string
|
||||
}
|
||||
|
||||
func (r *getTokensRequest) Validate() error {
|
||||
var errList []error
|
||||
if err := r.paginationRequest.Validate(); err != nil {
|
||||
errList = append(errList, err)
|
||||
}
|
||||
if r.Limit > getTokensMaxLimit {
|
||||
errList = append(errList, errors.Errorf("limit must be less than or equal to 1000"))
|
||||
}
|
||||
if r.Scope != "" && !r.Scope.IsValid() {
|
||||
errList = append(errList, errors.Errorf("invalid scope: %s", r.Scope))
|
||||
}
|
||||
|
||||
if r.AdditionalFieldsRaw == "" {
|
||||
// temporarily set default value for backward compatibility
|
||||
r.AdditionalFieldsRaw = "holdersCount" // TODO: remove this default value after all clients are updated
|
||||
}
|
||||
r.AdditionalFields = strings.Split(r.AdditionalFieldsRaw, ",")
|
||||
|
||||
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
|
||||
}
|
||||
|
||||
func (req *getTokensRequest) ParseDefault() error {
|
||||
if err := req.paginationRequest.ParseDefault(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if req.Scope == "" {
|
||||
req.Scope = GetTokensScopeAll
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type getTokensResult struct {
|
||||
List []*getTokenInfoResult `json:"list"`
|
||||
}
|
||||
|
||||
type getTokensResponse = HttpResponse[getTokensResult]
|
||||
|
||||
func (h *HttpHandler) GetTokens(ctx *fiber.Ctx) (err error) {
|
||||
var req getTokensRequest
|
||||
if err := ctx.QueryParser(&req); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := req.Validate(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := req.ParseDefault(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
blockHeight := req.BlockHeight
|
||||
if blockHeight == 0 {
|
||||
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
return errs.NewPublicError("latest block not found")
|
||||
}
|
||||
return errors.Wrap(err, "error during GetLatestBlock")
|
||||
}
|
||||
blockHeight = uint64(blockHeader.Height)
|
||||
}
|
||||
|
||||
// remove spacers
|
||||
search := strings.Replace(strings.Replace(req.Search, "•", "", -1), ".", "", -1)
|
||||
|
||||
var entries []*runes.RuneEntry
|
||||
switch req.Scope {
|
||||
case GetTokensScopeAll:
|
||||
entries, err = h.usecase.GetRuneEntries(ctx.UserContext(), search, blockHeight, req.Limit, req.Offset)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error during GetRuneEntryList")
|
||||
}
|
||||
case GetTokensScopeOngoing:
|
||||
entries, err = h.usecase.GetOngoingRuneEntries(ctx.UserContext(), search, blockHeight, req.Limit, req.Offset)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error during GetRuneEntryList")
|
||||
}
|
||||
default:
|
||||
return errs.NewPublicError(fmt.Sprintf("invalid scope: %s", req.Scope))
|
||||
}
|
||||
|
||||
runeIds := lo.Map(entries, func(item *runes.RuneEntry, _ int) runes.RuneId { return item.RuneId })
|
||||
holdersCounts := make(map[runes.RuneId]int64)
|
||||
if lo.Contains(req.AdditionalFields, "holdersCount") {
|
||||
holdersCounts, err = h.usecase.GetTotalHoldersByRuneIds(ctx.UserContext(), runeIds, blockHeight)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error during GetTotalHoldersByRuneIds")
|
||||
}
|
||||
}
|
||||
|
||||
results := make([]*getTokenInfoResult, 0, len(entries))
|
||||
for _, ent := range entries {
|
||||
var holdersCount *int64
|
||||
if lo.Contains(req.AdditionalFields, "holdersCount") {
|
||||
holdersCount = lo.ToPtr(holdersCounts[ent.RuneId])
|
||||
}
|
||||
result, err := createTokenInfoResult(ent, holdersCount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error during createTokenInfoResult")
|
||||
}
|
||||
|
||||
results = append(results, result)
|
||||
}
|
||||
|
||||
return errors.WithStack(ctx.JSON(getTokensResponse{
|
||||
Result: &getTokensResult{
|
||||
List: results,
|
||||
},
|
||||
}))
|
||||
}
|
||||
171
modules/runes/api/httphandler/get_transaction_by_hash.go
Normal file
171
modules/runes/api/httphandler/get_transaction_by_hash.go
Normal file
@@ -0,0 +1,171 @@
|
||||
package httphandler
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/runes"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
type getTransactionByHashRequest struct {
|
||||
Hash string `params:"hash"`
|
||||
}
|
||||
|
||||
func (r getTransactionByHashRequest) Validate() error {
|
||||
var errList []error
|
||||
if len(r.Hash) == 0 {
|
||||
errList = append(errList, errs.NewPublicError("hash is required"))
|
||||
}
|
||||
if len(r.Hash) > chainhash.MaxHashStringSize {
|
||||
errList = append(errList, errs.NewPublicError(fmt.Sprintf("hash length must be less than or equal to %d bytes", chainhash.MaxHashStringSize)))
|
||||
}
|
||||
if len(errList) == 0 {
|
||||
return nil
|
||||
}
|
||||
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
|
||||
}
|
||||
|
||||
type getTransactionByHashResponse = HttpResponse[transaction]
|
||||
|
||||
func (h *HttpHandler) GetTransactionByHash(ctx *fiber.Ctx) (err error) {
|
||||
var req getTransactionByHashRequest
|
||||
if err := ctx.ParamsParser(&req); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := req.Validate(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
hash, err := chainhash.NewHashFromStr(req.Hash)
|
||||
if err != nil {
|
||||
return errs.NewPublicError("invalid transaction hash")
|
||||
}
|
||||
|
||||
tx, err := h.usecase.GetRuneTransaction(ctx.UserContext(), *hash)
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
return fiber.NewError(fiber.StatusNotFound, "transaction not found")
|
||||
}
|
||||
return errors.Wrap(err, "error during GetRuneTransaction")
|
||||
}
|
||||
|
||||
allRuneIds := make(map[runes.RuneId]struct{})
|
||||
for id := range tx.Mints {
|
||||
allRuneIds[id] = struct{}{}
|
||||
}
|
||||
for id := range tx.Burns {
|
||||
allRuneIds[id] = struct{}{}
|
||||
}
|
||||
for _, input := range tx.Inputs {
|
||||
allRuneIds[input.RuneId] = struct{}{}
|
||||
}
|
||||
for _, output := range tx.Outputs {
|
||||
allRuneIds[output.RuneId] = struct{}{}
|
||||
}
|
||||
|
||||
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx.UserContext(), lo.Keys(allRuneIds))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch")
|
||||
}
|
||||
|
||||
respTx := &transaction{
|
||||
TxHash: tx.Hash,
|
||||
BlockHeight: tx.BlockHeight,
|
||||
Index: tx.Index,
|
||||
Timestamp: tx.Timestamp.Unix(),
|
||||
Inputs: make([]txInputOutput, 0, len(tx.Inputs)),
|
||||
Outputs: make([]txInputOutput, 0, len(tx.Outputs)),
|
||||
Mints: make(map[string]amountWithDecimal, len(tx.Mints)),
|
||||
Burns: make(map[string]amountWithDecimal, len(tx.Burns)),
|
||||
Extend: runeTransactionExtend{
|
||||
RuneEtched: tx.RuneEtched,
|
||||
Runestone: nil,
|
||||
},
|
||||
}
|
||||
for _, input := range tx.Inputs {
|
||||
address := addressFromPkScript(input.PkScript, h.network)
|
||||
respTx.Inputs = append(respTx.Inputs, txInputOutput{
|
||||
PkScript: hex.EncodeToString(input.PkScript),
|
||||
Address: address,
|
||||
Id: input.RuneId,
|
||||
Amount: input.Amount,
|
||||
Decimals: runeEntries[input.RuneId].Divisibility,
|
||||
Index: input.Index,
|
||||
})
|
||||
}
|
||||
for _, output := range tx.Outputs {
|
||||
address := addressFromPkScript(output.PkScript, h.network)
|
||||
respTx.Outputs = append(respTx.Outputs, txInputOutput{
|
||||
PkScript: hex.EncodeToString(output.PkScript),
|
||||
Address: address,
|
||||
Id: output.RuneId,
|
||||
Amount: output.Amount,
|
||||
Decimals: runeEntries[output.RuneId].Divisibility,
|
||||
Index: output.Index,
|
||||
})
|
||||
}
|
||||
for id, amount := range tx.Mints {
|
||||
respTx.Mints[id.String()] = amountWithDecimal{
|
||||
Amount: amount,
|
||||
Decimals: runeEntries[id].Divisibility,
|
||||
}
|
||||
}
|
||||
for id, amount := range tx.Burns {
|
||||
respTx.Burns[id.String()] = amountWithDecimal{
|
||||
Amount: amount,
|
||||
Decimals: runeEntries[id].Divisibility,
|
||||
}
|
||||
}
|
||||
if tx.Runestone != nil {
|
||||
var e *etching
|
||||
if tx.Runestone.Etching != nil {
|
||||
var symbol *string
|
||||
if tx.Runestone.Etching.Symbol != nil {
|
||||
symbol = lo.ToPtr(string(*tx.Runestone.Etching.Symbol))
|
||||
}
|
||||
var t *terms
|
||||
if tx.Runestone.Etching.Terms != nil {
|
||||
t = &terms{
|
||||
Amount: tx.Runestone.Etching.Terms.Amount,
|
||||
Cap: tx.Runestone.Etching.Terms.Cap,
|
||||
HeightStart: tx.Runestone.Etching.Terms.HeightStart,
|
||||
HeightEnd: tx.Runestone.Etching.Terms.HeightEnd,
|
||||
OffsetStart: tx.Runestone.Etching.Terms.OffsetStart,
|
||||
OffsetEnd: tx.Runestone.Etching.Terms.OffsetEnd,
|
||||
}
|
||||
}
|
||||
e = &etching{
|
||||
Divisibility: tx.Runestone.Etching.Divisibility,
|
||||
Premine: tx.Runestone.Etching.Premine,
|
||||
Rune: tx.Runestone.Etching.Rune,
|
||||
Spacers: tx.Runestone.Etching.Spacers,
|
||||
Symbol: symbol,
|
||||
Terms: t,
|
||||
Turbo: tx.Runestone.Etching.Turbo,
|
||||
}
|
||||
}
|
||||
respTx.Extend.Runestone = &runestone{
|
||||
Cenotaph: tx.Runestone.Cenotaph,
|
||||
Flaws: lo.Ternary(tx.Runestone.Cenotaph, tx.Runestone.Flaws.CollectAsString(), nil),
|
||||
Etching: e,
|
||||
Edicts: lo.Map(tx.Runestone.Edicts, func(ed runes.Edict, _ int) edict {
|
||||
return edict{
|
||||
Id: ed.Id,
|
||||
Amount: ed.Amount,
|
||||
Output: ed.Output,
|
||||
}
|
||||
}),
|
||||
Mint: tx.Runestone.Mint,
|
||||
Pointer: tx.Runestone.Pointer,
|
||||
}
|
||||
}
|
||||
|
||||
return errors.WithStack(ctx.JSON(getTransactionByHashResponse{
|
||||
Result: respTx,
|
||||
}))
|
||||
}
|
||||
@@ -1,8 +1,10 @@
|
||||
package httphandler
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"slices"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
@@ -15,17 +17,28 @@ import (
|
||||
)
|
||||
|
||||
type getTransactionsRequest struct {
|
||||
Wallet string `query:"wallet"`
|
||||
Id string `query:"id"`
|
||||
|
||||
FromBlock int64 `query:"fromBlock"`
|
||||
ToBlock int64 `query:"toBlock"`
|
||||
paginationRequest
|
||||
Wallet string `query:"wallet"`
|
||||
Id string `query:"id"`
|
||||
FromBlock int64 `query:"fromBlock"`
|
||||
ToBlock int64 `query:"toBlock"`
|
||||
}
|
||||
|
||||
func (r getTransactionsRequest) Validate() error {
|
||||
const (
|
||||
getTransactionsMaxLimit = 3000
|
||||
)
|
||||
|
||||
func (r *getTransactionsRequest) Validate() error {
|
||||
var errList []error
|
||||
if r.Id != "" && !isRuneIdOrRuneName(r.Id) {
|
||||
errList = append(errList, errors.New("'id' is not valid rune id or rune name"))
|
||||
if r.Id != "" {
|
||||
id, err := url.QueryUnescape(r.Id)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
r.Id = id
|
||||
if !isRuneIdOrRuneName(r.Id) {
|
||||
errList = append(errList, errors.Errorf("id '%s' is not valid rune id or rune name", r.Id))
|
||||
}
|
||||
}
|
||||
if r.FromBlock < -1 {
|
||||
errList = append(errList, errors.Errorf("invalid fromBlock range"))
|
||||
@@ -33,6 +46,12 @@ func (r getTransactionsRequest) Validate() error {
|
||||
if r.ToBlock < -1 {
|
||||
errList = append(errList, errors.Errorf("invalid toBlock range"))
|
||||
}
|
||||
if r.Limit < 0 {
|
||||
errList = append(errList, errors.New("'limit' must be non-negative"))
|
||||
}
|
||||
if r.Limit > getTransactionsMaxLimit {
|
||||
errList = append(errList, errors.Errorf("'limit' cannot exceed %d", getTransactionsMaxLimit))
|
||||
}
|
||||
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
|
||||
}
|
||||
|
||||
@@ -115,6 +134,9 @@ func (h *HttpHandler) GetTransactions(ctx *fiber.Ctx) (err error) {
|
||||
if err := req.Validate(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := req.ParseDefault(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
var pkScript []byte
|
||||
if req.Wallet != "" {
|
||||
@@ -130,7 +152,7 @@ func (h *HttpHandler) GetTransactions(ctx *fiber.Ctx) (err error) {
|
||||
var ok bool
|
||||
runeId, ok = h.resolveRuneId(ctx.UserContext(), req.Id)
|
||||
if !ok {
|
||||
return errs.NewPublicError("unable to resolve rune id from \"id\"")
|
||||
return errs.NewPublicError(fmt.Sprintf("unable to resolve rune id \"%s\" from \"id\"", req.Id))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,6 +165,9 @@ func (h *HttpHandler) GetTransactions(ctx *fiber.Ctx) (err error) {
|
||||
if req.FromBlock == -1 || req.ToBlock == -1 {
|
||||
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
return errs.NewPublicError("latest block not found")
|
||||
}
|
||||
return errors.Wrap(err, "error during GetLatestBlock")
|
||||
}
|
||||
if req.FromBlock == -1 {
|
||||
@@ -158,29 +183,34 @@ func (h *HttpHandler) GetTransactions(ctx *fiber.Ctx) (err error) {
|
||||
return errs.NewPublicError(fmt.Sprintf("fromBlock must be less than or equal to toBlock, got fromBlock=%d, toBlock=%d", req.FromBlock, req.ToBlock))
|
||||
}
|
||||
|
||||
txs, err := h.usecase.GetRuneTransactions(ctx.UserContext(), pkScript, runeId, uint64(req.FromBlock), uint64(req.ToBlock))
|
||||
txs, err := h.usecase.GetRuneTransactions(ctx.UserContext(), pkScript, runeId, uint64(req.FromBlock), uint64(req.ToBlock), req.Limit, req.Offset)
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
return errs.NewPublicError("transactions not found")
|
||||
}
|
||||
return errors.Wrap(err, "error during GetRuneTransactions")
|
||||
}
|
||||
|
||||
var allRuneIds []runes.RuneId
|
||||
allRuneIds := make(map[runes.RuneId]struct{})
|
||||
for _, tx := range txs {
|
||||
for id := range tx.Mints {
|
||||
allRuneIds = append(allRuneIds, id)
|
||||
allRuneIds[id] = struct{}{}
|
||||
}
|
||||
for id := range tx.Burns {
|
||||
allRuneIds = append(allRuneIds, id)
|
||||
allRuneIds[id] = struct{}{}
|
||||
}
|
||||
for _, input := range tx.Inputs {
|
||||
allRuneIds = append(allRuneIds, input.RuneId)
|
||||
allRuneIds[input.RuneId] = struct{}{}
|
||||
}
|
||||
for _, output := range tx.Outputs {
|
||||
allRuneIds = append(allRuneIds, output.RuneId)
|
||||
allRuneIds[output.RuneId] = struct{}{}
|
||||
}
|
||||
}
|
||||
allRuneIds = lo.Uniq(allRuneIds)
|
||||
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx.UserContext(), allRuneIds)
|
||||
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx.UserContext(), lo.Keys(allRuneIds))
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
return errs.NewPublicError("rune entries not found")
|
||||
}
|
||||
return errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch")
|
||||
}
|
||||
|
||||
@@ -279,12 +309,12 @@ func (h *HttpHandler) GetTransactions(ctx *fiber.Ctx) (err error) {
|
||||
}
|
||||
txList = append(txList, respTx)
|
||||
}
|
||||
// sort by block height ASC, then index ASC
|
||||
// sort by block height DESC, then index DESC
|
||||
slices.SortFunc(txList, func(t1, t2 transaction) int {
|
||||
if t1.BlockHeight != t2.BlockHeight {
|
||||
return int(t1.BlockHeight - t2.BlockHeight)
|
||||
return cmp.Compare(t2.BlockHeight, t1.BlockHeight)
|
||||
}
|
||||
return int(t1.Index - t2.Index)
|
||||
return cmp.Compare(t2.Index, t1.Index)
|
||||
})
|
||||
|
||||
resp := getTransactionsResponse{
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
package httphandler
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
|
||||
@@ -12,19 +13,37 @@ import (
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
type getUTXOsByAddressRequest struct {
|
||||
type getUTXOsRequest struct {
|
||||
paginationRequest
|
||||
Wallet string `params:"wallet"`
|
||||
Id string `query:"id"`
|
||||
BlockHeight uint64 `query:"blockHeight"`
|
||||
}
|
||||
|
||||
func (r getUTXOsByAddressRequest) Validate() error {
|
||||
const (
|
||||
getUTXOsMaxLimit = 3000
|
||||
)
|
||||
|
||||
func (r *getUTXOsRequest) Validate() error {
|
||||
var errList []error
|
||||
if r.Wallet == "" {
|
||||
errList = append(errList, errors.New("'wallet' is required"))
|
||||
}
|
||||
if r.Id != "" && !isRuneIdOrRuneName(r.Id) {
|
||||
errList = append(errList, errors.New("'id' is not valid rune id or rune name"))
|
||||
if r.Id != "" {
|
||||
id, err := url.QueryUnescape(r.Id)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
r.Id = id
|
||||
if !isRuneIdOrRuneName(r.Id) {
|
||||
errList = append(errList, errors.Errorf("id '%s' is not valid rune id or rune name", r.Id))
|
||||
}
|
||||
}
|
||||
if r.Limit < 0 {
|
||||
errList = append(errList, errors.New("'limit' must be non-negative"))
|
||||
}
|
||||
if r.Limit > getUTXOsMaxLimit {
|
||||
errList = append(errList, errors.Errorf("'limit' cannot exceed %d", getUTXOsMaxLimit))
|
||||
}
|
||||
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
|
||||
}
|
||||
@@ -41,21 +60,22 @@ type utxoExtend struct {
|
||||
Runes []runeBalance `json:"runes"`
|
||||
}
|
||||
|
||||
type utxo struct {
|
||||
type utxoItem struct {
|
||||
TxHash chainhash.Hash `json:"txHash"`
|
||||
OutputIndex uint32 `json:"outputIndex"`
|
||||
Sats int64 `json:"sats"`
|
||||
Extend utxoExtend `json:"extend"`
|
||||
}
|
||||
|
||||
type getUTXOsByAddressResult struct {
|
||||
List []utxo `json:"list"`
|
||||
BlockHeight uint64 `json:"blockHeight"`
|
||||
type getUTXOsResult struct {
|
||||
List []utxoItem `json:"list"`
|
||||
BlockHeight uint64 `json:"blockHeight"`
|
||||
}
|
||||
|
||||
type getUTXOsByAddressResponse = HttpResponse[getUTXOsByAddressResult]
|
||||
type getUTXOsResponse = HttpResponse[getUTXOsResult]
|
||||
|
||||
func (h *HttpHandler) GetUTXOsByAddress(ctx *fiber.Ctx) (err error) {
|
||||
var req getUTXOsByAddressRequest
|
||||
func (h *HttpHandler) GetUTXOs(ctx *fiber.Ctx) (err error) {
|
||||
var req getUTXOsRequest
|
||||
if err := ctx.ParamsParser(&req); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
@@ -65,6 +85,9 @@ func (h *HttpHandler) GetUTXOsByAddress(ctx *fiber.Ctx) (err error) {
|
||||
if err := req.Validate(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := req.ParseDefault(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
pkScript, ok := resolvePkScript(h.network, req.Wallet)
|
||||
if !ok {
|
||||
@@ -75,32 +98,52 @@ func (h *HttpHandler) GetUTXOsByAddress(ctx *fiber.Ctx) (err error) {
|
||||
if blockHeight == 0 {
|
||||
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
return errs.NewPublicError("latest block not found")
|
||||
}
|
||||
return errors.Wrap(err, "error during GetLatestBlock")
|
||||
}
|
||||
blockHeight = uint64(blockHeader.Height)
|
||||
}
|
||||
|
||||
outPointBalances, err := h.usecase.GetUnspentOutPointBalancesByPkScript(ctx.UserContext(), pkScript, blockHeight)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error during GetBalancesByPkScript")
|
||||
var utxos []*entity.RunesUTXOWithSats
|
||||
if runeId, ok := h.resolveRuneId(ctx.UserContext(), req.Id); ok {
|
||||
utxos, err = h.usecase.GetRunesUTXOsByRuneIdAndPkScript(ctx.UserContext(), runeId, pkScript, blockHeight, req.Limit, req.Offset)
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
return errs.NewPublicError("utxos not found")
|
||||
}
|
||||
return errors.Wrap(err, "error during GetBalancesByPkScript")
|
||||
}
|
||||
} else {
|
||||
utxos, err = h.usecase.GetRunesUTXOsByPkScript(ctx.UserContext(), pkScript, blockHeight, req.Limit, req.Offset)
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
return errs.NewPublicError("utxos not found")
|
||||
}
|
||||
return errors.Wrap(err, "error during GetBalancesByPkScript")
|
||||
}
|
||||
}
|
||||
|
||||
outPointBalanceRuneIds := lo.Map(outPointBalances, func(outPointBalance *entity.OutPointBalance, _ int) runes.RuneId {
|
||||
return outPointBalance.RuneId
|
||||
})
|
||||
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx.UserContext(), outPointBalanceRuneIds)
|
||||
runeIds := make(map[runes.RuneId]struct{}, 0)
|
||||
for _, utxo := range utxos {
|
||||
for _, balance := range utxo.RuneBalances {
|
||||
runeIds[balance.RuneId] = struct{}{}
|
||||
}
|
||||
}
|
||||
runeIdsList := lo.Keys(runeIds)
|
||||
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx.UserContext(), runeIdsList)
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
return errs.NewPublicError("rune entries not found")
|
||||
}
|
||||
return errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch")
|
||||
}
|
||||
|
||||
groupedBalances := lo.GroupBy(outPointBalances, func(outPointBalance *entity.OutPointBalance) wire.OutPoint {
|
||||
return outPointBalance.OutPoint
|
||||
})
|
||||
|
||||
utxoList := make([]utxo, 0, len(groupedBalances))
|
||||
for outPoint, balances := range groupedBalances {
|
||||
runeBalances := make([]runeBalance, 0, len(balances))
|
||||
for _, balance := range balances {
|
||||
utxoRespList := make([]utxoItem, 0, len(utxos))
|
||||
for _, utxo := range utxos {
|
||||
runeBalances := make([]runeBalance, 0, len(utxo.RuneBalances))
|
||||
for _, balance := range utxo.RuneBalances {
|
||||
runeEntry := runeEntries[balance.RuneId]
|
||||
runeBalances = append(runeBalances, runeBalance{
|
||||
RuneId: balance.RuneId,
|
||||
@@ -111,34 +154,20 @@ func (h *HttpHandler) GetUTXOsByAddress(ctx *fiber.Ctx) (err error) {
|
||||
})
|
||||
}
|
||||
|
||||
utxoList = append(utxoList, utxo{
|
||||
TxHash: outPoint.Hash,
|
||||
OutputIndex: outPoint.Index,
|
||||
utxoRespList = append(utxoRespList, utxoItem{
|
||||
TxHash: utxo.OutPoint.Hash,
|
||||
OutputIndex: utxo.OutPoint.Index,
|
||||
Sats: utxo.Sats,
|
||||
Extend: utxoExtend{
|
||||
Runes: runeBalances,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// filter by req.Id if exists
|
||||
{
|
||||
runeId, ok := h.resolveRuneId(ctx.UserContext(), req.Id)
|
||||
if ok {
|
||||
utxoList = lo.Filter(utxoList, func(u utxo, _ int) bool {
|
||||
for _, runeBalance := range u.Extend.Runes {
|
||||
if runeBalance.RuneId == runeId {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
resp := getUTXOsByAddressResponse{
|
||||
Result: &getUTXOsByAddressResult{
|
||||
resp := getUTXOsResponse{
|
||||
Result: &getUTXOsResult{
|
||||
BlockHeight: blockHeight,
|
||||
List: utxoList,
|
||||
List: utxoRespList,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,92 @@
|
||||
package httphandler
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/runes"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/usecase"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
type getUTXOsOutputByLocationRequest struct {
|
||||
TxHash string `params:"txHash"`
|
||||
OutputIndex int32 `query:"outputIndex"`
|
||||
}
|
||||
|
||||
func (r getUTXOsOutputByLocationRequest) Validate() error {
|
||||
var errList []error
|
||||
if r.TxHash == "" {
|
||||
errList = append(errList, errors.New("'txHash' is required"))
|
||||
}
|
||||
if r.OutputIndex < 0 {
|
||||
errList = append(errList, errors.New("'outputIndex' must be non-negative"))
|
||||
}
|
||||
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
|
||||
}
|
||||
|
||||
type getUTXOsOutputByTxIdResponse = HttpResponse[utxoItem]
|
||||
|
||||
func (h *HttpHandler) GetUTXOsOutputByLocation(ctx *fiber.Ctx) (err error) {
|
||||
var req getUTXOsOutputByLocationRequest
|
||||
if err := ctx.ParamsParser(&req); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := ctx.QueryParser(&req); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := req.Validate(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
txHash, err := chainhash.NewHashFromStr(req.TxHash)
|
||||
if err != nil {
|
||||
return errs.WithPublicMessage(err, "unable to resolve txHash")
|
||||
}
|
||||
|
||||
utxo, err := h.usecase.GetUTXOsOutputByLocation(ctx.UserContext(), *txHash, uint32(req.OutputIndex))
|
||||
if err != nil {
|
||||
if errors.Is(err, usecase.ErrUTXONotFound) {
|
||||
return errs.NewPublicError("utxo not found")
|
||||
}
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
runeIds := make(map[runes.RuneId]struct{}, 0)
|
||||
for _, balance := range utxo.RuneBalances {
|
||||
runeIds[balance.RuneId] = struct{}{}
|
||||
}
|
||||
runeIdsList := lo.Keys(runeIds)
|
||||
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx.UserContext(), runeIdsList)
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
return errs.NewPublicError("rune entries not found")
|
||||
}
|
||||
return errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch")
|
||||
}
|
||||
|
||||
runeBalances := make([]runeBalance, 0, len(utxo.RuneBalances))
|
||||
for _, balance := range utxo.RuneBalances {
|
||||
runeEntry := runeEntries[balance.RuneId]
|
||||
runeBalances = append(runeBalances, runeBalance{
|
||||
RuneId: balance.RuneId,
|
||||
Rune: runeEntry.SpacedRune,
|
||||
Symbol: string(runeEntry.Symbol),
|
||||
Amount: balance.Amount,
|
||||
Divisibility: runeEntry.Divisibility,
|
||||
})
|
||||
}
|
||||
|
||||
resp := getUTXOsOutputByTxIdResponse{
|
||||
Result: &utxoItem{
|
||||
TxHash: utxo.OutPoint.Hash,
|
||||
OutputIndex: utxo.OutPoint.Index,
|
||||
Sats: utxo.Sats,
|
||||
Extend: utxoExtend{
|
||||
Runes: runeBalances,
|
||||
},
|
||||
},
|
||||
}
|
||||
return errors.WithStack(ctx.JSON(resp))
|
||||
}
|
||||
@@ -0,0 +1,136 @@
|
||||
package httphandler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/runes"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/usecase"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/samber/lo"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type getUTXOsOutputByLocationQuery struct {
|
||||
TxHash string `json:"txHash"`
|
||||
OutputIndex int32 `json:"outputIndex"`
|
||||
}
|
||||
|
||||
type getUTXOsOutputByLocationBatchRequest struct {
|
||||
Queries []getUTXOsOutputByLocationQuery `json:"queries"`
|
||||
}
|
||||
|
||||
const getUTXOsOutputByLocationBatchMaxQueries = 100
|
||||
|
||||
func (r getUTXOsOutputByLocationBatchRequest) Validate() error {
|
||||
var errList []error
|
||||
if len(r.Queries) == 0 {
|
||||
errList = append(errList, errors.New("at least one query is required"))
|
||||
}
|
||||
if len(r.Queries) > getUTXOsOutputByLocationBatchMaxQueries {
|
||||
errList = append(errList, errors.Errorf("cannot exceed %d queries", getUTXOsOutputByLocationBatchMaxQueries))
|
||||
}
|
||||
for i, query := range r.Queries {
|
||||
if query.TxHash == "" {
|
||||
errList = append(errList, errors.Errorf("queries[%d]: 'txHash' is required", i))
|
||||
}
|
||||
if query.OutputIndex < 0 {
|
||||
errList = append(errList, errors.Errorf("queries[%d]: 'outputIndex' must be non-negative", i))
|
||||
}
|
||||
}
|
||||
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
|
||||
}
|
||||
|
||||
type getUTXOsOutputByLocationBatchResult struct {
|
||||
List []*utxoItem `json:"list"`
|
||||
}
|
||||
|
||||
type getUTXOsOutputByLocationBatchResponse = HttpResponse[getUTXOsOutputByLocationBatchResult]
|
||||
|
||||
func (h *HttpHandler) GetUTXOsOutputByLocationBatch(ctx *fiber.Ctx) (err error) {
|
||||
var req getUTXOsOutputByLocationBatchRequest
|
||||
if err := ctx.BodyParser(&req); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := req.Validate(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
processQuery := func(ctx context.Context, query getUTXOsOutputByLocationQuery, queryIndex int) (*utxoItem, error) {
|
||||
txHash, err := chainhash.NewHashFromStr(query.TxHash)
|
||||
if err != nil {
|
||||
return nil, errs.WithPublicMessage(err, fmt.Sprintf("unable to parse txHash from \"queries[%d].txHash\"", queryIndex))
|
||||
}
|
||||
|
||||
utxo, err := h.usecase.GetUTXOsOutputByLocation(ctx, *txHash, uint32(query.OutputIndex))
|
||||
if err != nil {
|
||||
if errors.Is(err, usecase.ErrUTXONotFound) {
|
||||
return nil, errs.NewPublicError(fmt.Sprintf("utxo not found for queries[%d]", queryIndex))
|
||||
}
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
runeIds := make(map[runes.RuneId]struct{}, 0)
|
||||
for _, balance := range utxo.RuneBalances {
|
||||
runeIds[balance.RuneId] = struct{}{}
|
||||
}
|
||||
runeIdsList := lo.Keys(runeIds)
|
||||
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx, runeIdsList)
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
return nil, errs.NewPublicError(fmt.Sprintf("rune entries not found for queries[%d]", queryIndex))
|
||||
}
|
||||
return nil, errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch")
|
||||
}
|
||||
|
||||
runeBalances := make([]runeBalance, 0, len(utxo.RuneBalances))
|
||||
for _, balance := range utxo.RuneBalances {
|
||||
runeEntry := runeEntries[balance.RuneId]
|
||||
runeBalances = append(runeBalances, runeBalance{
|
||||
RuneId: balance.RuneId,
|
||||
Rune: runeEntry.SpacedRune,
|
||||
Symbol: string(runeEntry.Symbol),
|
||||
Amount: balance.Amount,
|
||||
Divisibility: runeEntry.Divisibility,
|
||||
})
|
||||
}
|
||||
|
||||
return &utxoItem{
|
||||
TxHash: utxo.OutPoint.Hash,
|
||||
OutputIndex: utxo.OutPoint.Index,
|
||||
Sats: utxo.Sats,
|
||||
Extend: utxoExtend{
|
||||
Runes: runeBalances,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
results := make([]*utxoItem, len(req.Queries))
|
||||
eg, ectx := errgroup.WithContext(ctx.UserContext())
|
||||
for i, query := range req.Queries {
|
||||
i := i
|
||||
query := query
|
||||
eg.Go(func() error {
|
||||
result, err := processQuery(ectx, query, i)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error during processQuery for query %d", i)
|
||||
}
|
||||
results[i] = result
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err := eg.Wait(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
resp := getUTXOsOutputByLocationBatchResponse{
|
||||
Result: &getUTXOsOutputByLocationBatchResult{
|
||||
List: results,
|
||||
},
|
||||
}
|
||||
|
||||
return errors.WithStack(ctx.JSON(resp))
|
||||
}
|
||||
@@ -7,7 +7,9 @@ import (
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
"github.com/btcsuite/btcd/chaincfg"
|
||||
"github.com/btcsuite/btcd/txscript"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/runes"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/usecase"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
@@ -31,6 +33,53 @@ type HttpResponse[T any] struct {
|
||||
Result *T `json:"result,omitempty"`
|
||||
}
|
||||
|
||||
type paginationRequest struct {
|
||||
Offset int32 `query:"offset"`
|
||||
Limit int32 `query:"limit"`
|
||||
|
||||
// OrderBy string `query:"orderBy"` // ASC or DESC
|
||||
// SortBy string `query:"sortBy"` // column name
|
||||
}
|
||||
|
||||
func (req paginationRequest) Validate() error {
|
||||
var errList []error
|
||||
|
||||
// this just safeguard for limit,
|
||||
// each path should have own validation.
|
||||
if req.Limit > 10000 {
|
||||
errList = append(errList, errors.Errorf("too large limit"))
|
||||
}
|
||||
if req.Limit < 0 {
|
||||
errList = append(errList, errors.Errorf("limit must be greater than or equal to 0"))
|
||||
}
|
||||
if req.Offset < 0 {
|
||||
errList = append(errList, errors.Errorf("offset must be greater than or equal to 0"))
|
||||
}
|
||||
|
||||
// TODO:
|
||||
// if req.OrderBy != "" && req.OrderBy != "ASC" && req.OrderBy != "DESC" {
|
||||
// errList = append(errList, errors.Errorf("invalid orderBy value, must be `ASC` or `DESC`"))
|
||||
// }
|
||||
|
||||
return errs.WithPublicMessage(errors.Join(errList...), "pagination validation error")
|
||||
}
|
||||
|
||||
func (req *paginationRequest) ParseDefault() error {
|
||||
if req == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if req.Limit == 0 {
|
||||
req.Limit = 100
|
||||
}
|
||||
|
||||
// TODO:
|
||||
// if req.OrderBy == "" {
|
||||
// req.OrderBy = "ASC"
|
||||
// }
|
||||
return nil
|
||||
}
|
||||
|
||||
func resolvePkScript(network common.Network, wallet string) ([]byte, bool) {
|
||||
if wallet == "" {
|
||||
return nil, false
|
||||
@@ -41,6 +90,10 @@ func resolvePkScript(network common.Network, wallet string) ([]byte, bool) {
|
||||
return &chaincfg.MainNetParams
|
||||
case common.NetworkTestnet:
|
||||
return &chaincfg.TestNet3Params
|
||||
case common.NetworkFractalMainnet:
|
||||
return &chaincfg.MainNetParams
|
||||
case common.NetworkFractalTestnet:
|
||||
return &chaincfg.MainNetParams
|
||||
}
|
||||
panic("invalid network")
|
||||
}()
|
||||
|
||||
@@ -7,12 +7,17 @@ import (
|
||||
func (h *HttpHandler) Mount(router fiber.Router) error {
|
||||
r := router.Group("/v2/runes")
|
||||
|
||||
r.Post("/balances/wallet/batch", h.GetBalancesByAddressBatch)
|
||||
r.Get("/balances/wallet/:wallet", h.GetBalancesByAddress)
|
||||
r.Post("/balances/wallet/batch", h.GetBalancesBatch)
|
||||
r.Get("/balances/wallet/:wallet", h.GetBalances)
|
||||
r.Get("/transactions", h.GetTransactions)
|
||||
r.Get("/transactions/hash/:hash", h.GetTransactionByHash)
|
||||
r.Get("/holders/:id", h.GetHolders)
|
||||
r.Post("/info/batch", h.GetTokenInfoBatch)
|
||||
r.Get("/info/:id", h.GetTokenInfo)
|
||||
r.Get("/utxos/wallet/:wallet", h.GetUTXOsByAddress)
|
||||
r.Get("/utxos/wallet/:wallet", h.GetUTXOs)
|
||||
r.Post("/utxos/output/batch", h.GetUTXOsOutputByLocationBatch)
|
||||
r.Get("/utxos/output/:txHash", h.GetUTXOsOutputByLocation)
|
||||
r.Get("/block", h.GetCurrentBlock)
|
||||
r.Get("/tokens", h.GetTokens)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
package runes
|
||||
|
||||
import (
|
||||
"github.com/Cleverse/go-utilities/utils"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/gaze-network/indexer-network/common"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
)
|
||||
|
||||
const (
|
||||
Version = "v0.0.1"
|
||||
DBVersion = 1
|
||||
EventHashVersion = 1
|
||||
)
|
||||
|
||||
var startingBlockHeader = map[common.Network]types.BlockHeader{
|
||||
common.NetworkMainnet: {
|
||||
Height: 839999,
|
||||
Hash: *utils.Must(chainhash.NewHashFromStr("0000000000000000000172014ba58d66455762add0512355ad651207918494ab")),
|
||||
PrevBlock: *utils.Must(chainhash.NewHashFromStr("00000000000000000001dcce6ce7c8a45872cafd1fb04732b447a14a91832591")),
|
||||
},
|
||||
common.NetworkTestnet: {
|
||||
Height: 2583200,
|
||||
Hash: *utils.Must(chainhash.NewHashFromStr("000000000006c5f0dfcd9e0e81f27f97a87aef82087ffe69cd3c390325bb6541")),
|
||||
PrevBlock: *utils.Must(chainhash.NewHashFromStr("00000000000668f3bafac992f53424774515440cb47e1cb9e73af3f496139e28")),
|
||||
},
|
||||
}
|
||||
122
modules/runes/constants/constants.go
Normal file
122
modules/runes/constants/constants.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package constants
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/gaze-network/indexer-network/common"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/runes"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
"github.com/gaze-network/uint128"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
const (
|
||||
Version = "v0.0.1"
|
||||
DBVersion = 1
|
||||
EventHashVersion = 1
|
||||
)
|
||||
|
||||
// starting block heights and hashes should be 1 block before activation block, as indexer will start from the block after this value
|
||||
var StartingBlockHeader = map[common.Network]types.BlockHeader{
|
||||
common.NetworkMainnet: {
|
||||
Height: 839999,
|
||||
},
|
||||
common.NetworkTestnet: {
|
||||
Height: 2519999,
|
||||
},
|
||||
common.NetworkFractalMainnet: {
|
||||
Height: 83999,
|
||||
},
|
||||
common.NetworkFractalTestnet: {
|
||||
Height: 83999,
|
||||
},
|
||||
}
|
||||
|
||||
type GenesisRuneConfig struct {
|
||||
RuneId runes.RuneId
|
||||
Name string
|
||||
Number uint64
|
||||
Divisibility uint8
|
||||
Premine uint128.Uint128
|
||||
SpacedRune runes.SpacedRune
|
||||
Symbol rune
|
||||
Terms *runes.Terms
|
||||
Turbo bool
|
||||
EtchingTxHash chainhash.Hash
|
||||
EtchedAt time.Time
|
||||
}
|
||||
|
||||
var GenesisRuneConfigMap = map[common.Network]GenesisRuneConfig{
|
||||
common.NetworkMainnet: {
|
||||
RuneId: runes.RuneId{BlockHeight: 1, TxIndex: 0},
|
||||
Number: 0,
|
||||
Divisibility: 0,
|
||||
Premine: uint128.Zero,
|
||||
SpacedRune: runes.NewSpacedRune(runes.NewRune(2055900680524219742), 0b10000000),
|
||||
Symbol: '\u29c9',
|
||||
Terms: &runes.Terms{
|
||||
Amount: lo.ToPtr(uint128.From64(1)),
|
||||
Cap: &uint128.Max,
|
||||
HeightStart: lo.ToPtr(uint64(840000)),
|
||||
HeightEnd: lo.ToPtr(uint64(1050000)),
|
||||
OffsetStart: nil,
|
||||
OffsetEnd: nil,
|
||||
},
|
||||
Turbo: true,
|
||||
EtchingTxHash: chainhash.Hash{},
|
||||
EtchedAt: time.Unix(0, 0),
|
||||
},
|
||||
common.NetworkFractalMainnet: {
|
||||
RuneId: runes.RuneId{BlockHeight: 1, TxIndex: 0},
|
||||
Number: 0,
|
||||
Divisibility: 0,
|
||||
Premine: uint128.Zero,
|
||||
SpacedRune: runes.NewSpacedRune(runes.NewRune(2055900680524219742), 0b10000000),
|
||||
Symbol: '\u29c9',
|
||||
Terms: &runes.Terms{
|
||||
Amount: lo.ToPtr(uint128.From64(1)),
|
||||
Cap: &uint128.Max,
|
||||
HeightStart: lo.ToPtr(uint64(84000)),
|
||||
HeightEnd: lo.ToPtr(uint64(2184000)),
|
||||
OffsetStart: nil,
|
||||
OffsetEnd: nil,
|
||||
},
|
||||
Turbo: true,
|
||||
EtchingTxHash: chainhash.Hash{},
|
||||
EtchedAt: time.Unix(0, 0),
|
||||
},
|
||||
common.NetworkFractalTestnet: {
|
||||
RuneId: runes.RuneId{BlockHeight: 1, TxIndex: 0},
|
||||
Number: 0,
|
||||
Divisibility: 0,
|
||||
Premine: uint128.Zero,
|
||||
SpacedRune: runes.NewSpacedRune(runes.NewRune(2055900680524219742), 0b10000000),
|
||||
Symbol: '\u29c9',
|
||||
Terms: &runes.Terms{
|
||||
Amount: lo.ToPtr(uint128.From64(1)),
|
||||
Cap: &uint128.Max,
|
||||
HeightStart: lo.ToPtr(uint64(84000)),
|
||||
HeightEnd: lo.ToPtr(uint64(2184000)),
|
||||
OffsetStart: nil,
|
||||
OffsetEnd: nil,
|
||||
},
|
||||
Turbo: true,
|
||||
EtchingTxHash: chainhash.Hash{},
|
||||
EtchedAt: time.Unix(0, 0),
|
||||
},
|
||||
}
|
||||
|
||||
func NetworkHasGenesisRune(network common.Network) bool {
|
||||
switch network {
|
||||
case common.NetworkMainnet, common.NetworkFractalMainnet, common.NetworkFractalTestnet:
|
||||
return true
|
||||
case common.NetworkTestnet:
|
||||
return false
|
||||
default:
|
||||
logger.Panic(fmt.Sprintf("unsupported network: %s", network))
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
BEGIN;
|
||||
|
||||
CREATE EXTENSION IF NOT EXISTS pg_trgm;
|
||||
-- Indexer Client Information
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "runes_indexer_stats" (
|
||||
@@ -48,6 +49,7 @@ CREATE TABLE IF NOT EXISTS "runes_entries" (
|
||||
"etched_at" TIMESTAMP NOT NULL
|
||||
);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS runes_entries_rune_idx ON "runes_entries" USING BTREE ("rune");
|
||||
CREATE INDEX IF NOT EXISTS runes_entries_rune_gin_idx ON "runes_entries" USING GIN ("rune" gin_trgm_ops); -- to speed up queries with LIKE operator
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS runes_entries_number_idx ON "runes_entries" USING BTREE ("number");
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "runes_entry_states" (
|
||||
@@ -118,5 +120,7 @@ CREATE TABLE IF NOT EXISTS "runes_balances" (
|
||||
"amount" DECIMAL NOT NULL,
|
||||
PRIMARY KEY ("pkscript", "rune_id", "block_height")
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS runes_balances_rune_id_block_height_idx ON "runes_balances" USING BTREE ("rune_id", "block_height");
|
||||
CREATE INDEX IF NOT EXISTS runes_balances_pkscript_block_height_idx ON "runes_balances" USING BTREE ("pkscript", "block_height");
|
||||
|
||||
COMMIT;
|
||||
|
||||
104
modules/runes/database/postgresql/queries/batch.sql
Normal file
104
modules/runes/database/postgresql/queries/batch.sql
Normal file
@@ -0,0 +1,104 @@
|
||||
-- name: BatchCreateRunesBalances :exec
|
||||
INSERT INTO runes_balances ("pkscript", "block_height", "rune_id", "amount")
|
||||
VALUES(
|
||||
unnest(@pkscript_arr::TEXT[]),
|
||||
unnest(@block_height_arr::INT[]),
|
||||
unnest(@rune_id_arr::TEXT[]),
|
||||
unnest(@amount_arr::DECIMAL[])
|
||||
);
|
||||
|
||||
-- name: BatchCreateRuneEntries :exec
|
||||
INSERT INTO runes_entries ("rune_id", "rune", "number", "spacers", "premine", "symbol", "divisibility", "terms", "terms_amount", "terms_cap", "terms_height_start", "terms_height_end", "terms_offset_start", "terms_offset_end", "turbo", "etching_block", "etching_tx_hash", "etched_at")
|
||||
VALUES(
|
||||
unnest(@rune_id_arr::TEXT[]),
|
||||
unnest(@rune_arr::TEXT[]),
|
||||
unnest(@number_arr::BIGINT[]),
|
||||
unnest(@spacers_arr::INT[]),
|
||||
unnest(@premine_arr::DECIMAL[]),
|
||||
unnest(@symbol_arr::INT[]),
|
||||
unnest(@divisibility_arr::SMALLINT[]),
|
||||
unnest(@terms_arr::BOOLEAN[]),
|
||||
unnest(@terms_amount_arr::DECIMAL[]),
|
||||
unnest(@terms_cap_arr::DECIMAL[]),
|
||||
unnest(@terms_height_start_arr::INT[]), -- nullable (need patch)
|
||||
unnest(@terms_height_end_arr::INT[]), -- nullable (need patch)
|
||||
unnest(@terms_offset_start_arr::INT[]), -- nullable (need patch)
|
||||
unnest(@terms_offset_end_arr::INT[]), -- nullable (need patch)
|
||||
unnest(@turbo_arr::BOOLEAN[]),
|
||||
unnest(@etching_block_arr::INT[]),
|
||||
unnest(@etching_tx_hash_arr::TEXT[]),
|
||||
unnest(@etched_at_arr::TIMESTAMP[])
|
||||
);
|
||||
|
||||
-- name: BatchCreateRuneEntryStates :exec
|
||||
INSERT INTO runes_entry_states ("rune_id", "block_height", "mints", "burned_amount", "completed_at", "completed_at_height")
|
||||
VALUES(
|
||||
unnest(@rune_id_arr::TEXT[]),
|
||||
unnest(@block_height_arr::INT[]),
|
||||
unnest(@mints_arr::DECIMAL[]),
|
||||
unnest(@burned_amount_arr::DECIMAL[]),
|
||||
unnest(@completed_at_arr::TIMESTAMP[]),
|
||||
unnest(@completed_at_height_arr::INT[]) -- nullable (need patch)
|
||||
);
|
||||
|
||||
-- name: BatchCreateRunesOutpointBalances :exec
|
||||
INSERT INTO runes_outpoint_balances ("rune_id", "pkscript", "tx_hash", "tx_idx", "amount", "block_height", "spent_height")
|
||||
VALUES(
|
||||
unnest(@rune_id_arr::TEXT[]),
|
||||
unnest(@pkscript_arr::TEXT[]),
|
||||
unnest(@tx_hash_arr::TEXT[]),
|
||||
unnest(@tx_idx_arr::INT[]),
|
||||
unnest(@amount_arr::DECIMAL[]),
|
||||
unnest(@block_height_arr::INT[]),
|
||||
unnest(@spent_height_arr::INT[]) -- nullable (need patch)
|
||||
);
|
||||
|
||||
-- name: BatchSpendOutpointBalances :exec
|
||||
UPDATE runes_outpoint_balances
|
||||
SET "spent_height" = @spent_height::INT
|
||||
FROM (
|
||||
SELECT
|
||||
unnest(@tx_hash_arr::TEXT[]) AS tx_hash,
|
||||
unnest(@tx_idx_arr::INT[]) AS tx_idx
|
||||
) AS input
|
||||
WHERE "runes_outpoint_balances"."tx_hash" = "input"."tx_hash" AND "runes_outpoint_balances"."tx_idx" = "input"."tx_idx";
|
||||
|
||||
-- name: BatchCreateRunestones :exec
|
||||
INSERT INTO runes_runestones ("tx_hash", "block_height", "etching", "etching_divisibility", "etching_premine", "etching_rune", "etching_spacers", "etching_symbol", "etching_terms", "etching_terms_amount", "etching_terms_cap", "etching_terms_height_start", "etching_terms_height_end", "etching_terms_offset_start", "etching_terms_offset_end", "etching_turbo", "edicts", "mint", "pointer", "cenotaph", "flaws")
|
||||
VALUES(
|
||||
unnest(@tx_hash_arr::TEXT[]),
|
||||
unnest(@block_height_arr::INT[]),
|
||||
unnest(@etching_arr::BOOLEAN[]),
|
||||
unnest(@etching_divisibility_arr::SMALLINT[]), -- nullable (need patch)
|
||||
unnest(@etching_premine_arr::DECIMAL[]),
|
||||
unnest(@etching_rune_arr::TEXT[]), -- nullable (need patch)
|
||||
unnest(@etching_spacers_arr::INT[]), -- nullable (need patch)
|
||||
unnest(@etching_symbol_arr::INT[]), -- nullable (need patch)
|
||||
unnest(@etching_terms_arr::BOOLEAN[]), -- nullable (need patch)
|
||||
unnest(@etching_terms_amount_arr::DECIMAL[]),
|
||||
unnest(@etching_terms_cap_arr::DECIMAL[]),
|
||||
unnest(@etching_terms_height_start_arr::INT[]), -- nullable (need patch)
|
||||
unnest(@etching_terms_height_end_arr::INT[]), -- nullable (need patch)
|
||||
unnest(@etching_terms_offset_start_arr::INT[]), -- nullable (need patch)
|
||||
unnest(@etching_terms_offset_end_arr::INT[]), -- nullable (need patch)
|
||||
unnest(@etching_turbo_arr::BOOLEAN[]), -- nullable (need patch)
|
||||
unnest(@edicts_arr::JSONB[]),
|
||||
unnest(@mint_arr::TEXT[]), -- nullable (need patch)
|
||||
unnest(@pointer_arr::INT[]), -- nullable (need patch)
|
||||
unnest(@cenotaph_arr::BOOLEAN[]),
|
||||
unnest(@flaws_arr::INT[])
|
||||
);
|
||||
|
||||
-- name: BatchCreateRuneTransactions :exec
|
||||
INSERT INTO runes_transactions ("hash", "block_height", "index", "timestamp", "inputs", "outputs", "mints", "burns", "rune_etched")
|
||||
VALUES (
|
||||
unnest(@hash_arr::TEXT[]),
|
||||
unnest(@block_height_arr::INT[]),
|
||||
unnest(@index_arr::INT[]),
|
||||
unnest(@timestamp_arr::TIMESTAMP[]),
|
||||
unnest(@inputs_arr::JSONB[]),
|
||||
unnest(@outputs_arr::JSONB[]),
|
||||
unnest(@mints_arr::JSONB[]),
|
||||
unnest(@burns_arr::JSONB[]),
|
||||
unnest(@rune_etched_arr::BOOLEAN[])
|
||||
);
|
||||
@@ -2,22 +2,48 @@
|
||||
WITH balances AS (
|
||||
SELECT DISTINCT ON (rune_id) * FROM runes_balances WHERE pkscript = $1 AND block_height <= $2 ORDER BY rune_id, block_height DESC
|
||||
)
|
||||
SELECT * FROM balances WHERE amount > 0;
|
||||
SELECT * FROM balances WHERE amount > 0 ORDER BY amount DESC, rune_id LIMIT $3 OFFSET $4;
|
||||
|
||||
-- name: GetBalancesByRuneId :many
|
||||
WITH balances AS (
|
||||
SELECT DISTINCT ON (pkscript) * FROM runes_balances WHERE rune_id = $1 AND block_height <= $2 ORDER BY pkscript, block_height DESC
|
||||
)
|
||||
SELECT * FROM balances WHERE amount > 0;
|
||||
SELECT * FROM balances WHERE amount > 0 ORDER BY amount DESC, pkscript LIMIT $3 OFFSET $4;
|
||||
|
||||
-- name: GetBalanceByPkScriptAndRuneId :one
|
||||
SELECT * FROM runes_balances WHERE pkscript = $1 AND rune_id = $2 AND block_height <= $3 ORDER BY block_height DESC LIMIT 1;
|
||||
|
||||
-- name: GetTotalHoldersByRuneIds :many
|
||||
WITH balances AS (
|
||||
SELECT DISTINCT ON (rune_id, pkscript) * FROM runes_balances WHERE rune_id = ANY(@rune_ids::TEXT[]) AND block_height <= @block_height ORDER BY rune_id, pkscript, block_height DESC
|
||||
)
|
||||
SELECT rune_id, COUNT(DISTINCT pkscript) FROM balances WHERE amount > 0 GROUP BY rune_id;
|
||||
|
||||
-- name: GetOutPointBalancesAtOutPoint :many
|
||||
SELECT * FROM runes_outpoint_balances WHERE tx_hash = $1 AND tx_idx = $2;
|
||||
|
||||
-- name: GetUnspentOutPointBalancesByPkScript :many
|
||||
SELECT * FROM runes_outpoint_balances WHERE pkscript = @pkScript AND block_height <= @block_height AND (spent_height IS NULL OR spent_height > @block_height);
|
||||
-- name: GetRunesUTXOsByPkScript :many
|
||||
SELECT tx_hash, tx_idx, max("pkscript") as pkscript, array_agg("rune_id") as rune_ids, array_agg("amount") as amounts
|
||||
FROM runes_outpoint_balances
|
||||
WHERE
|
||||
pkscript = @pkScript AND
|
||||
block_height <= @block_height AND
|
||||
(spent_height IS NULL OR spent_height > @block_height)
|
||||
GROUP BY tx_hash, tx_idx
|
||||
ORDER BY tx_hash, tx_idx
|
||||
LIMIT $1 OFFSET $2;
|
||||
|
||||
-- name: GetRunesUTXOsByRuneIdAndPkScript :many
|
||||
SELECT tx_hash, tx_idx, max("pkscript") as pkscript, array_agg("rune_id") as rune_ids, array_agg("amount") as amounts
|
||||
FROM runes_outpoint_balances
|
||||
WHERE
|
||||
pkscript = @pkScript AND
|
||||
block_height <= @block_height AND
|
||||
(spent_height IS NULL OR spent_height > @block_height)
|
||||
GROUP BY tx_hash, tx_idx
|
||||
HAVING array_agg("rune_id") @> @rune_ids::text[]
|
||||
ORDER BY tx_hash, tx_idx
|
||||
LIMIT $1 OFFSET $2;
|
||||
|
||||
-- name: GetRuneEntriesByRuneIds :many
|
||||
WITH states AS (
|
||||
@@ -37,6 +63,49 @@ SELECT * FROM runes_entries
|
||||
LEFT JOIN states ON runes_entries.rune_id = states.rune_id
|
||||
WHERE runes_entries.rune_id = ANY(@rune_ids::text[]) AND etching_block <= @height;
|
||||
|
||||
-- name: GetRuneEntries :many
|
||||
WITH states AS (
|
||||
-- select latest state
|
||||
SELECT DISTINCT ON (rune_id) * FROM runes_entry_states WHERE block_height <= @height ORDER BY rune_id, block_height DESC
|
||||
)
|
||||
SELECT * FROM runes_entries
|
||||
LEFT JOIN states ON runes_entries.rune_id = states.rune_id
|
||||
WHERE (
|
||||
@search = '' OR
|
||||
runes_entries.rune ILIKE @search || '%'
|
||||
)
|
||||
ORDER BY runes_entries.number
|
||||
LIMIT @_limit OFFSET @_offset;
|
||||
|
||||
-- name: GetOngoingRuneEntries :many
|
||||
WITH states AS (
|
||||
-- select latest state
|
||||
SELECT DISTINCT ON (rune_id) * FROM runes_entry_states WHERE block_height <= @height::integer ORDER BY rune_id, block_height DESC
|
||||
)
|
||||
SELECT * FROM runes_entries
|
||||
LEFT JOIN states ON runes_entries.rune_id = states.rune_id
|
||||
WHERE (
|
||||
runes_entries.terms = TRUE AND
|
||||
COALESCE(runes_entries.terms_amount, 0) != 0 AND
|
||||
COALESCE(runes_entries.terms_cap, 0) != 0 AND
|
||||
states.mints < runes_entries.terms_cap AND
|
||||
(
|
||||
runes_entries.terms_height_start IS NULL OR runes_entries.terms_height_start <= @height::integer
|
||||
) AND (
|
||||
runes_entries.terms_height_end IS NULL OR @height::integer <= runes_entries.terms_height_end
|
||||
) AND (
|
||||
runes_entries.terms_offset_start IS NULL OR runes_entries.terms_offset_start + runes_entries.etching_block <= @height::integer
|
||||
) AND (
|
||||
runes_entries.terms_offset_end IS NULL OR @height::integer <= runes_entries.terms_offset_start + runes_entries.etching_block
|
||||
)
|
||||
|
||||
) AND (
|
||||
@search::text = '' OR
|
||||
runes_entries.rune ILIKE '%' || @search::text || '%'
|
||||
)
|
||||
ORDER BY states.mints DESC
|
||||
LIMIT @_limit OFFSET @_offset;
|
||||
|
||||
-- name: GetRuneIdFromRune :one
|
||||
SELECT rune_id FROM runes_entries WHERE rune = $1;
|
||||
|
||||
@@ -57,7 +126,12 @@ SELECT * FROM runes_transactions
|
||||
) AND (
|
||||
@from_block <= runes_transactions.block_height AND runes_transactions.block_height <= @to_block
|
||||
)
|
||||
ORDER BY runes_transactions.block_height DESC LIMIT 10000;
|
||||
ORDER BY runes_transactions.block_height DESC, runes_transactions.index DESC LIMIT $1 OFFSET $2;
|
||||
|
||||
-- name: GetRuneTransaction :one
|
||||
SELECT * FROM runes_transactions
|
||||
LEFT JOIN runes_runestones ON runes_transactions.hash = runes_runestones.tx_hash
|
||||
WHERE hash = $1 LIMIT 1;
|
||||
|
||||
-- name: CountRuneEntries :one
|
||||
SELECT COUNT(*) FROM runes_entries;
|
||||
@@ -76,13 +150,13 @@ INSERT INTO runes_transactions (hash, block_height, index, timestamp, inputs, ou
|
||||
INSERT INTO runes_runestones (tx_hash, block_height, etching, etching_divisibility, etching_premine, etching_rune, etching_spacers, etching_symbol, etching_terms, etching_terms_amount, etching_terms_cap, etching_terms_height_start, etching_terms_height_end, etching_terms_offset_start, etching_terms_offset_end, etching_turbo, edicts, mint, pointer, cenotaph, flaws)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21);
|
||||
|
||||
-- name: CreateOutPointBalances :batchexec
|
||||
-- name: CreateOutPointBalance :exec
|
||||
INSERT INTO runes_outpoint_balances (rune_id, pkscript, tx_hash, tx_idx, amount, block_height, spent_height) VALUES ($1, $2, $3, $4, $5, $6, $7);
|
||||
|
||||
-- name: SpendOutPointBalances :exec
|
||||
-- name: SpendOutPointBalance :exec
|
||||
UPDATE runes_outpoint_balances SET spent_height = $1 WHERE tx_hash = $2 AND tx_idx = $3;
|
||||
|
||||
-- name: CreateRuneBalanceAtBlock :batchexec
|
||||
-- name: CreateRuneBalance :exec
|
||||
INSERT INTO runes_balances (pkscript, block_height, rune_id, amount) VALUES ($1, $2, $3, $4);
|
||||
|
||||
-- name: GetLatestIndexedBlock :one
|
||||
|
||||
@@ -3,11 +3,11 @@ package datagateway
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/runes"
|
||||
"github.com/gaze-network/uint128"
|
||||
)
|
||||
|
||||
type RunesDataGateway interface {
|
||||
@@ -27,10 +27,12 @@ type RunesReaderDataGateway interface {
|
||||
GetLatestBlock(ctx context.Context) (types.BlockHeader, error)
|
||||
GetIndexedBlockByHeight(ctx context.Context, height int64) (*entity.IndexedBlock, error)
|
||||
// GetRuneTransactions returns the runes transactions, filterable by pkScript, runeId and height. If pkScript, runeId or height is zero value, that filter is ignored.
|
||||
GetRuneTransactions(ctx context.Context, pkScript []byte, runeId runes.RuneId, fromBlock, toBlock uint64) ([]*entity.RuneTransaction, error)
|
||||
GetRuneTransactions(ctx context.Context, pkScript []byte, runeId runes.RuneId, fromBlock, toBlock uint64, limit int32, offset int32) ([]*entity.RuneTransaction, error)
|
||||
GetRuneTransaction(ctx context.Context, txHash chainhash.Hash) (*entity.RuneTransaction, error)
|
||||
|
||||
GetRunesBalancesAtOutPoint(ctx context.Context, outPoint wire.OutPoint) (map[runes.RuneId]*entity.OutPointBalance, error)
|
||||
GetUnspentOutPointBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) ([]*entity.OutPointBalance, error)
|
||||
GetRunesUTXOsByRuneIdAndPkScript(ctx context.Context, runeId runes.RuneId, pkScript []byte, blockHeight uint64, limit int32, offset int32) ([]*entity.RunesUTXO, error)
|
||||
GetRunesUTXOsByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64, limit int32, offset int32) ([]*entity.RunesUTXO, error)
|
||||
// GetRuneIdFromRune returns the RuneId for the given rune. Returns errs.NotFound if the rune entry is not found.
|
||||
GetRuneIdFromRune(ctx context.Context, rune runes.Rune) (runes.RuneId, error)
|
||||
// GetRuneEntryByRuneId returns the RuneEntry for the given runeId. Returns errs.NotFound if the rune entry is not found.
|
||||
@@ -41,25 +43,33 @@ type RunesReaderDataGateway interface {
|
||||
GetRuneEntryByRuneIdAndHeight(ctx context.Context, runeId runes.RuneId, blockHeight uint64) (*runes.RuneEntry, error)
|
||||
// GetRuneEntryByRuneIdAndHeightBatch returns the RuneEntries for the given runeIds and block height.
|
||||
GetRuneEntryByRuneIdAndHeightBatch(ctx context.Context, runeIds []runes.RuneId, blockHeight uint64) (map[runes.RuneId]*runes.RuneEntry, error)
|
||||
// GetRuneEntries returns a list of rune entries, sorted by etching order. If search is not empty, it will filter the results by rune name (prefix).
|
||||
GetRuneEntries(ctx context.Context, search string, blockHeight uint64, limit int32, offset int32) ([]*runes.RuneEntry, error)
|
||||
// GetOngoingRuneEntries returns a list of ongoing rune entries (can still mint), sorted by mint progress percent. If search is not empty, it will filter the results by rune name (prefix).
|
||||
GetOngoingRuneEntries(ctx context.Context, search string, blockHeight uint64, limit int32, offset int32) ([]*runes.RuneEntry, error)
|
||||
// CountRuneEntries returns the number of existing rune entries.
|
||||
CountRuneEntries(ctx context.Context) (uint64, error)
|
||||
|
||||
// GetBalancesByPkScript returns the balances for the given pkScript at the given blockHeight.
|
||||
GetBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) (map[runes.RuneId]*entity.Balance, error)
|
||||
// Use limit = -1 as no limit.
|
||||
GetBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64, limit int32, offset int32) ([]*entity.Balance, error)
|
||||
// GetBalancesByRuneId returns the balances for the given runeId at the given blockHeight.
|
||||
// Cannot use []byte as map key, so we're returning as slice.
|
||||
GetBalancesByRuneId(ctx context.Context, runeId runes.RuneId, blockHeight uint64) ([]*entity.Balance, error)
|
||||
// Use limit = -1 as no limit.
|
||||
GetBalancesByRuneId(ctx context.Context, runeId runes.RuneId, blockHeight uint64, limit int32, offset int32) ([]*entity.Balance, error)
|
||||
// GetBalancesByPkScriptAndRuneId returns the balance for the given pkScript and runeId at the given blockHeight.
|
||||
GetBalanceByPkScriptAndRuneId(ctx context.Context, pkScript []byte, runeId runes.RuneId, blockHeight uint64) (*entity.Balance, error)
|
||||
// GetTotalHoldersByRuneIds returns the total holders of each the given runeIds.
|
||||
GetTotalHoldersByRuneIds(ctx context.Context, runeIds []runes.RuneId, blockHeight uint64) (map[runes.RuneId]int64, error)
|
||||
}
|
||||
|
||||
type RunesWriterDataGateway interface {
|
||||
CreateRuneEntry(ctx context.Context, entry *runes.RuneEntry, blockHeight uint64) error
|
||||
CreateRuneEntryState(ctx context.Context, entry *runes.RuneEntry, blockHeight uint64) error
|
||||
CreateRuneEntries(ctx context.Context, entries []*runes.RuneEntry) error
|
||||
CreateRuneEntryStates(ctx context.Context, entries []*runes.RuneEntry, blockHeight uint64) error
|
||||
CreateOutPointBalances(ctx context.Context, outPointBalances []*entity.OutPointBalance) error
|
||||
SpendOutPointBalances(ctx context.Context, outPoint wire.OutPoint, blockHeight uint64) error
|
||||
CreateRuneBalances(ctx context.Context, params []CreateRuneBalancesParams) error
|
||||
CreateRuneTransaction(ctx context.Context, tx *entity.RuneTransaction) error
|
||||
SpendOutPointBalancesBatch(ctx context.Context, outPoints []wire.OutPoint, blockHeight uint64) error
|
||||
CreateRuneBalances(ctx context.Context, params []*entity.Balance) error
|
||||
CreateRuneTransactions(ctx context.Context, txs []*entity.RuneTransaction) error
|
||||
CreateIndexedBlock(ctx context.Context, block *entity.IndexedBlock) error
|
||||
|
||||
// TODO: collapse these into a single function (ResetStateToHeight)?
|
||||
@@ -72,10 +82,3 @@ type RunesWriterDataGateway interface {
|
||||
UnspendOutPointBalancesSinceHeight(ctx context.Context, height uint64) error
|
||||
DeleteRuneBalancesSinceHeight(ctx context.Context, height uint64) error
|
||||
}
|
||||
|
||||
type CreateRuneBalancesParams struct {
|
||||
PkScript []byte
|
||||
RuneId runes.RuneId
|
||||
Balance uint128.Uint128
|
||||
BlockHeight uint64
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/constants"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/runes"
|
||||
"github.com/gaze-network/uint128"
|
||||
@@ -28,7 +29,7 @@ func (p *Processor) calculateEventHash(header types.BlockHeader) (chainhash.Hash
|
||||
|
||||
func (p *Processor) getHashPayload(header types.BlockHeader) ([]byte, error) {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("payload:v" + strconv.Itoa(EventHashVersion) + ":")
|
||||
sb.WriteString("payload:v" + strconv.Itoa(constants.EventHashVersion) + ":")
|
||||
sb.WriteString("blockHash:")
|
||||
sb.Write(header.Hash[:])
|
||||
|
||||
|
||||
23
modules/runes/internal/entity/runes_utxo.go
Normal file
23
modules/runes/internal/entity/runes_utxo.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package entity
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/runes"
|
||||
"github.com/gaze-network/uint128"
|
||||
)
|
||||
|
||||
type RunesUTXOBalance struct {
|
||||
RuneId runes.RuneId
|
||||
Amount uint128.Uint128
|
||||
}
|
||||
|
||||
type RunesUTXO struct {
|
||||
PkScript []byte
|
||||
OutPoint wire.OutPoint
|
||||
RuneBalances []RunesUTXOBalance
|
||||
}
|
||||
|
||||
type RunesUTXOWithSats struct {
|
||||
RunesUTXO
|
||||
Sats int64
|
||||
}
|
||||
@@ -4,13 +4,13 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/core/indexer"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/constants"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/datagateway"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/runes"
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
|
||||
"github.com/gaze-network/indexer-network/pkg/reportingclient"
|
||||
"github.com/gaze-network/uint128"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
// Make sure to implement the Bitcoin Processor interface
|
||||
@@ -68,8 +67,8 @@ func (p *Processor) VerifyStates(ctx context.Context) error {
|
||||
if err := p.ensureValidState(ctx); err != nil {
|
||||
return errors.Wrap(err, "error during ensureValidState")
|
||||
}
|
||||
if p.network == common.NetworkMainnet {
|
||||
if err := p.ensureGenesisRune(ctx); err != nil {
|
||||
if constants.NetworkHasGenesisRune(p.network) {
|
||||
if err := p.ensureGenesisRune(ctx, p.network); err != nil {
|
||||
return errors.Wrap(err, "error during ensureGenesisRune")
|
||||
}
|
||||
}
|
||||
@@ -89,17 +88,17 @@ func (p *Processor) ensureValidState(ctx context.Context) error {
|
||||
// if not found, set indexer state
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
if err := p.indexerInfoDg.SetIndexerState(ctx, entity.IndexerState{
|
||||
DBVersion: DBVersion,
|
||||
EventHashVersion: EventHashVersion,
|
||||
DBVersion: constants.DBVersion,
|
||||
EventHashVersion: constants.EventHashVersion,
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "failed to set indexer state")
|
||||
}
|
||||
} else {
|
||||
if indexerState.DBVersion != DBVersion {
|
||||
return errors.Wrapf(errs.ConflictSetting, "db version mismatch: current version is %d. Please upgrade to version %d", indexerState.DBVersion, DBVersion)
|
||||
if indexerState.DBVersion != constants.DBVersion {
|
||||
return errors.Wrapf(errs.ConflictSetting, "db version mismatch: current version is %d. Please upgrade to version %d", indexerState.DBVersion, constants.DBVersion)
|
||||
}
|
||||
if indexerState.EventHashVersion != EventHashVersion {
|
||||
return errors.Wrapf(errs.ConflictSetting, "event version mismatch: current version is %d. Please reset rune's db first.", indexerState.EventHashVersion, EventHashVersion)
|
||||
if indexerState.EventHashVersion != constants.EventHashVersion {
|
||||
return errors.Wrapf(errs.ConflictSetting, "event version mismatch: current version is %d. Please reset rune's db first.", indexerState.EventHashVersion, constants.EventHashVersion)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -119,39 +118,37 @@ func (p *Processor) ensureValidState(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var genesisRuneId = runes.RuneId{BlockHeight: 1, TxIndex: 0}
|
||||
|
||||
func (p *Processor) ensureGenesisRune(ctx context.Context) error {
|
||||
_, err := p.runesDg.GetRuneEntryByRuneId(ctx, genesisRuneId)
|
||||
func (p *Processor) ensureGenesisRune(ctx context.Context, network common.Network) error {
|
||||
genesisRuneConfig, ok := constants.GenesisRuneConfigMap[network]
|
||||
if !ok {
|
||||
logger.Panic("genesis rune config not found", slogx.Stringer("network", network))
|
||||
}
|
||||
_, err := p.runesDg.GetRuneEntryByRuneId(ctx, genesisRuneConfig.RuneId)
|
||||
if err != nil && !errors.Is(err, errs.NotFound) {
|
||||
return errors.Wrap(err, "failed to get genesis rune entry")
|
||||
}
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
runeEntry := &runes.RuneEntry{
|
||||
RuneId: genesisRuneId,
|
||||
Number: 0,
|
||||
Divisibility: 0,
|
||||
Premine: uint128.Zero,
|
||||
SpacedRune: runes.NewSpacedRune(runes.NewRune(2055900680524219742), 0b10000000),
|
||||
Symbol: '\u29c9',
|
||||
Terms: &runes.Terms{
|
||||
Amount: lo.ToPtr(uint128.From64(1)),
|
||||
Cap: &uint128.Max,
|
||||
HeightStart: lo.ToPtr(uint64(common.HalvingInterval * 4)),
|
||||
HeightEnd: lo.ToPtr(uint64(common.HalvingInterval * 5)),
|
||||
OffsetStart: nil,
|
||||
OffsetEnd: nil,
|
||||
},
|
||||
Turbo: true,
|
||||
RuneId: genesisRuneConfig.RuneId,
|
||||
Number: genesisRuneConfig.Number,
|
||||
Divisibility: genesisRuneConfig.Divisibility,
|
||||
Premine: genesisRuneConfig.Premine,
|
||||
SpacedRune: genesisRuneConfig.SpacedRune,
|
||||
Symbol: genesisRuneConfig.Symbol,
|
||||
Terms: genesisRuneConfig.Terms,
|
||||
Turbo: genesisRuneConfig.Turbo,
|
||||
Mints: uint128.Zero,
|
||||
BurnedAmount: uint128.Zero,
|
||||
CompletedAt: time.Time{},
|
||||
CompletedAtHeight: nil,
|
||||
EtchingBlock: 1,
|
||||
EtchingTxHash: chainhash.Hash{},
|
||||
EtchedAt: time.Time{},
|
||||
EtchingBlock: genesisRuneConfig.RuneId.BlockHeight,
|
||||
EtchingTxHash: genesisRuneConfig.EtchingTxHash,
|
||||
EtchedAt: genesisRuneConfig.EtchedAt,
|
||||
}
|
||||
if err := p.runesDg.CreateRuneEntry(ctx, runeEntry, genesisRuneId.BlockHeight); err != nil {
|
||||
if err := p.runesDg.CreateRuneEntries(ctx, []*runes.RuneEntry{runeEntry}); err != nil {
|
||||
return errors.Wrap(err, "failed to create genesis rune entry")
|
||||
}
|
||||
if err := p.runesDg.CreateRuneEntryStates(ctx, []*runes.RuneEntry{runeEntry}, genesisRuneConfig.RuneId.BlockHeight); err != nil {
|
||||
return errors.Wrap(err, "failed to create genesis rune entry")
|
||||
}
|
||||
}
|
||||
@@ -166,7 +163,7 @@ func (p *Processor) CurrentBlock(ctx context.Context) (types.BlockHeader, error)
|
||||
blockHeader, err := p.runesDg.GetLatestBlock(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
return startingBlockHeader[p.network], nil
|
||||
return constants.StartingBlockHeader[p.network], nil
|
||||
}
|
||||
return types.BlockHeader{}, errors.Wrap(err, "failed to get latest block")
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/datagateway"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/constants"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/runes"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
@@ -26,19 +26,26 @@ import (
|
||||
func (p *Processor) Process(ctx context.Context, blocks []*types.Block) error {
|
||||
for _, block := range blocks {
|
||||
ctx := logger.WithContext(ctx, slog.Int64("height", block.Header.Height))
|
||||
logger.DebugContext(ctx, "Processing new block", slog.Int("txs", len(block.Transactions)))
|
||||
logger.InfoContext(ctx, "Processing new block",
|
||||
slogx.String("event", "runes_processor_processing_block"),
|
||||
slog.Int("txs", len(block.Transactions)),
|
||||
)
|
||||
|
||||
start := time.Now()
|
||||
for _, tx := range block.Transactions {
|
||||
if err := p.processTx(ctx, tx, block.Header); err != nil {
|
||||
return errors.Wrap(err, "failed to process tx")
|
||||
}
|
||||
}
|
||||
timeTakenToProcess := time.Since(start)
|
||||
logger.InfoContext(ctx, "Processed block",
|
||||
slogx.String("event", "runes_processor_processed_block"),
|
||||
slog.Duration("time_taken", timeTakenToProcess),
|
||||
)
|
||||
|
||||
if err := p.flushBlock(ctx, block.Header); err != nil {
|
||||
return errors.Wrap(err, "failed to flush block")
|
||||
}
|
||||
|
||||
logger.DebugContext(ctx, "Inserted new block")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -146,7 +153,7 @@ func (p *Processor) processTx(ctx context.Context, tx *types.Transaction, blockH
|
||||
// find all non-OP_RETURN outputs
|
||||
var destinations []int
|
||||
for i, txOut := range tx.TxOut {
|
||||
if txOut.IsOpReturn() {
|
||||
if !txOut.IsOpReturn() {
|
||||
destinations = append(destinations, i)
|
||||
}
|
||||
}
|
||||
@@ -466,7 +473,7 @@ func (p *Processor) txCommitsToRune(ctx context.Context, tx *types.Transaction,
|
||||
// It is impossible to verify that input utxo is a P2TR output with just the input.
|
||||
// Need to verify with utxo's pk script.
|
||||
|
||||
prevTx, err := p.bitcoinClient.GetTransactionByHash(ctx, txIn.PreviousOutTxHash)
|
||||
prevTx, blockHeight, err := p.bitcoinClient.GetRawTransactionAndHeightByTxHash(ctx, txIn.PreviousOutTxHash)
|
||||
if err != nil && errors.Is(err, errs.NotFound) {
|
||||
continue
|
||||
}
|
||||
@@ -479,7 +486,7 @@ func (p *Processor) txCommitsToRune(ctx context.Context, tx *types.Transaction,
|
||||
break
|
||||
}
|
||||
// input must be mature enough
|
||||
confirmations := tx.BlockHeight - prevTx.BlockHeight + 1
|
||||
confirmations := tx.BlockHeight - blockHeight + 1
|
||||
if confirmations < runes.RUNE_COMMIT_BLOCKS {
|
||||
continue
|
||||
}
|
||||
@@ -668,6 +675,7 @@ func (p *Processor) getRunesBalancesAtOutPoint(ctx context.Context, outPoint wir
|
||||
}
|
||||
|
||||
func (p *Processor) flushBlock(ctx context.Context, blockHeader types.BlockHeader) error {
|
||||
start := time.Now()
|
||||
runesDgTx, err := p.runesDg.BeginRunesTx(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to begin runes tx")
|
||||
@@ -687,10 +695,10 @@ func (p *Processor) flushBlock(ctx context.Context, blockHeader types.BlockHeade
|
||||
return errors.Wrap(err, "failed to calculate event hash")
|
||||
}
|
||||
prevIndexedBlock, err := runesDgTx.GetIndexedBlockByHeight(ctx, blockHeader.Height-1)
|
||||
if err != nil && errors.Is(err, errs.NotFound) && blockHeader.Height-1 == startingBlockHeader[p.network].Height {
|
||||
if err != nil && errors.Is(err, errs.NotFound) && blockHeader.Height-1 == constants.StartingBlockHeader[p.network].Height {
|
||||
prevIndexedBlock = &entity.IndexedBlock{
|
||||
Height: startingBlockHeader[p.network].Height,
|
||||
Hash: startingBlockHeader[p.network].Hash,
|
||||
Height: constants.StartingBlockHeader[p.network].Height,
|
||||
Hash: chainhash.Hash{},
|
||||
EventHash: chainhash.Hash{},
|
||||
CumulativeEventHash: chainhash.Hash{},
|
||||
}
|
||||
@@ -714,86 +722,90 @@ func (p *Processor) flushBlock(ctx context.Context, blockHeader types.BlockHeade
|
||||
return errors.Wrap(err, "failed to create indexed block")
|
||||
}
|
||||
// flush new rune entries
|
||||
{
|
||||
for _, runeEntry := range p.newRuneEntries {
|
||||
if err := runesDgTx.CreateRuneEntry(ctx, runeEntry, uint64(blockHeader.Height)); err != nil {
|
||||
return errors.Wrap(err, "failed to create rune entry")
|
||||
}
|
||||
}
|
||||
p.newRuneEntries = make(map[runes.RuneId]*runes.RuneEntry)
|
||||
newRuneEntries := lo.Values(p.newRuneEntries)
|
||||
if err := runesDgTx.CreateRuneEntries(ctx, newRuneEntries); err != nil {
|
||||
return errors.Wrap(err, "failed to create rune entry")
|
||||
}
|
||||
p.newRuneEntries = make(map[runes.RuneId]*runes.RuneEntry)
|
||||
|
||||
// flush new rune entry states
|
||||
{
|
||||
for _, runeEntry := range p.newRuneEntryStates {
|
||||
if err := runesDgTx.CreateRuneEntryState(ctx, runeEntry, uint64(blockHeader.Height)); err != nil {
|
||||
return errors.Wrap(err, "failed to create rune entry state")
|
||||
}
|
||||
}
|
||||
p.newRuneEntryStates = make(map[runes.RuneId]*runes.RuneEntry)
|
||||
newRuneEntryStates := lo.Values(p.newRuneEntryStates)
|
||||
if err := runesDgTx.CreateRuneEntryStates(ctx, newRuneEntryStates, uint64(blockHeader.Height)); err != nil {
|
||||
return errors.Wrap(err, "failed to create rune entry state")
|
||||
}
|
||||
p.newRuneEntryStates = make(map[runes.RuneId]*runes.RuneEntry)
|
||||
|
||||
// flush new outpoint balances
|
||||
{
|
||||
newBalances := make([]*entity.OutPointBalance, 0)
|
||||
for _, balances := range p.newOutPointBalances {
|
||||
newBalances = append(newBalances, balances...)
|
||||
}
|
||||
if err := runesDgTx.CreateOutPointBalances(ctx, newBalances); err != nil {
|
||||
return errors.Wrap(err, "failed to create outpoint balances")
|
||||
}
|
||||
p.newOutPointBalances = make(map[wire.OutPoint][]*entity.OutPointBalance)
|
||||
newOutpointBalances := make([]*entity.OutPointBalance, 0)
|
||||
for _, balances := range p.newOutPointBalances {
|
||||
newOutpointBalances = append(newOutpointBalances, balances...)
|
||||
}
|
||||
if err := runesDgTx.CreateOutPointBalances(ctx, newOutpointBalances); err != nil {
|
||||
return errors.Wrap(err, "failed to create outpoint balances")
|
||||
}
|
||||
p.newOutPointBalances = make(map[wire.OutPoint][]*entity.OutPointBalance)
|
||||
|
||||
// flush new spend outpoints
|
||||
{
|
||||
for _, outPoint := range p.newSpendOutPoints {
|
||||
if err := runesDgTx.SpendOutPointBalances(ctx, outPoint, uint64(blockHeader.Height)); err != nil {
|
||||
return errors.Wrap(err, "failed to create spend outpoint")
|
||||
}
|
||||
}
|
||||
p.newSpendOutPoints = make([]wire.OutPoint, 0)
|
||||
newSpendOutPoints := p.newSpendOutPoints
|
||||
if err := runesDgTx.SpendOutPointBalancesBatch(ctx, newSpendOutPoints, uint64(blockHeader.Height)); err != nil {
|
||||
return errors.Wrap(err, "failed to create spend outpoint")
|
||||
}
|
||||
// flush new balances
|
||||
{
|
||||
params := make([]datagateway.CreateRuneBalancesParams, 0)
|
||||
for pkScriptStr, balances := range p.newBalances {
|
||||
pkScript, err := hex.DecodeString(pkScriptStr)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to decode pk script")
|
||||
}
|
||||
for runeId, balance := range balances {
|
||||
params = append(params, datagateway.CreateRuneBalancesParams{
|
||||
PkScript: pkScript,
|
||||
RuneId: runeId,
|
||||
Balance: balance,
|
||||
BlockHeight: uint64(blockHeader.Height),
|
||||
})
|
||||
}
|
||||
p.newSpendOutPoints = make([]wire.OutPoint, 0)
|
||||
|
||||
// flush new newBalances
|
||||
newBalances := make([]*entity.Balance, 0)
|
||||
for pkScriptStr, balances := range p.newBalances {
|
||||
pkScript, err := hex.DecodeString(pkScriptStr)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to decode pk script")
|
||||
}
|
||||
if err := runesDgTx.CreateRuneBalances(ctx, params); err != nil {
|
||||
return errors.Wrap(err, "failed to create balances at block")
|
||||
for runeId, balance := range balances {
|
||||
newBalances = append(newBalances, &entity.Balance{
|
||||
PkScript: pkScript,
|
||||
RuneId: runeId,
|
||||
Amount: balance,
|
||||
BlockHeight: uint64(blockHeader.Height),
|
||||
})
|
||||
}
|
||||
p.newBalances = make(map[string]map[runes.RuneId]uint128.Uint128)
|
||||
}
|
||||
if err := runesDgTx.CreateRuneBalances(ctx, newBalances); err != nil {
|
||||
return errors.Wrap(err, "failed to create balances at block")
|
||||
}
|
||||
p.newBalances = make(map[string]map[runes.RuneId]uint128.Uint128)
|
||||
|
||||
// flush new rune transactions
|
||||
{
|
||||
for _, runeTx := range p.newRuneTxs {
|
||||
if err := runesDgTx.CreateRuneTransaction(ctx, runeTx); err != nil {
|
||||
return errors.Wrap(err, "failed to create rune transaction")
|
||||
}
|
||||
}
|
||||
p.newRuneTxs = make([]*entity.RuneTransaction, 0)
|
||||
newRuneTxs := p.newRuneTxs
|
||||
if err := runesDgTx.CreateRuneTransactions(ctx, newRuneTxs); err != nil {
|
||||
return errors.Wrap(err, "failed to create rune transaction")
|
||||
}
|
||||
p.newRuneTxs = make([]*entity.RuneTransaction, 0)
|
||||
|
||||
if err := runesDgTx.Commit(ctx); err != nil {
|
||||
return errors.Wrap(err, "failed to commit runes tx")
|
||||
}
|
||||
timeTaken := time.Since(start)
|
||||
logger.InfoContext(ctx, "Flushed block",
|
||||
slogx.String("event", "runes_processor_flushed_block"),
|
||||
slog.Int64("height", blockHeader.Height),
|
||||
slog.String("hash", blockHeader.Hash.String()),
|
||||
slog.String("event_hash", hex.EncodeToString(eventHash[:])),
|
||||
slog.String("cumulative_event_hash", hex.EncodeToString(cumulativeEventHash[:])),
|
||||
slog.Int("new_rune_entries", len(newRuneEntries)),
|
||||
slog.Int("new_rune_entry_states", len(newRuneEntryStates)),
|
||||
slog.Int("new_outpoint_balances", len(newOutpointBalances)),
|
||||
slog.Int("new_spend_outpoints", len(newSpendOutPoints)),
|
||||
slog.Int("new_balances", len(newBalances)),
|
||||
slog.Int("new_rune_txs", len(newRuneTxs)),
|
||||
slogx.Duration("time_taken", timeTaken),
|
||||
)
|
||||
|
||||
// submit event to reporting system
|
||||
if p.reportingClient != nil {
|
||||
if err := p.reportingClient.SubmitBlockReport(ctx, reportingclient.SubmitBlockReportPayload{
|
||||
Type: "runes",
|
||||
ClientVersion: Version,
|
||||
DBVersion: DBVersion,
|
||||
EventHashVersion: EventHashVersion,
|
||||
ClientVersion: constants.Version,
|
||||
DBVersion: constants.DBVersion,
|
||||
EventHashVersion: constants.EventHashVersion,
|
||||
Network: p.network,
|
||||
BlockHeight: uint64(blockHeader.Height),
|
||||
BlockHash: blockHeader.Hash,
|
||||
|
||||
@@ -1,130 +0,0 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.26.0
|
||||
// source: batch.go
|
||||
|
||||
package gen
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/pgtype"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrBatchAlreadyClosed = errors.New("batch already closed")
|
||||
)
|
||||
|
||||
const createOutPointBalances = `-- name: CreateOutPointBalances :batchexec
|
||||
INSERT INTO runes_outpoint_balances (rune_id, pkscript, tx_hash, tx_idx, amount, block_height, spent_height) VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||
`
|
||||
|
||||
type CreateOutPointBalancesBatchResults struct {
|
||||
br pgx.BatchResults
|
||||
tot int
|
||||
closed bool
|
||||
}
|
||||
|
||||
type CreateOutPointBalancesParams struct {
|
||||
RuneID string
|
||||
Pkscript string
|
||||
TxHash string
|
||||
TxIdx int32
|
||||
Amount pgtype.Numeric
|
||||
BlockHeight int32
|
||||
SpentHeight pgtype.Int4
|
||||
}
|
||||
|
||||
func (q *Queries) CreateOutPointBalances(ctx context.Context, arg []CreateOutPointBalancesParams) *CreateOutPointBalancesBatchResults {
|
||||
batch := &pgx.Batch{}
|
||||
for _, a := range arg {
|
||||
vals := []interface{}{
|
||||
a.RuneID,
|
||||
a.Pkscript,
|
||||
a.TxHash,
|
||||
a.TxIdx,
|
||||
a.Amount,
|
||||
a.BlockHeight,
|
||||
a.SpentHeight,
|
||||
}
|
||||
batch.Queue(createOutPointBalances, vals...)
|
||||
}
|
||||
br := q.db.SendBatch(ctx, batch)
|
||||
return &CreateOutPointBalancesBatchResults{br, len(arg), false}
|
||||
}
|
||||
|
||||
func (b *CreateOutPointBalancesBatchResults) Exec(f func(int, error)) {
|
||||
defer b.br.Close()
|
||||
for t := 0; t < b.tot; t++ {
|
||||
if b.closed {
|
||||
if f != nil {
|
||||
f(t, ErrBatchAlreadyClosed)
|
||||
}
|
||||
continue
|
||||
}
|
||||
_, err := b.br.Exec()
|
||||
if f != nil {
|
||||
f(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *CreateOutPointBalancesBatchResults) Close() error {
|
||||
b.closed = true
|
||||
return b.br.Close()
|
||||
}
|
||||
|
||||
const createRuneBalanceAtBlock = `-- name: CreateRuneBalanceAtBlock :batchexec
|
||||
INSERT INTO runes_balances (pkscript, block_height, rune_id, amount) VALUES ($1, $2, $3, $4)
|
||||
`
|
||||
|
||||
type CreateRuneBalanceAtBlockBatchResults struct {
|
||||
br pgx.BatchResults
|
||||
tot int
|
||||
closed bool
|
||||
}
|
||||
|
||||
type CreateRuneBalanceAtBlockParams struct {
|
||||
Pkscript string
|
||||
BlockHeight int32
|
||||
RuneID string
|
||||
Amount pgtype.Numeric
|
||||
}
|
||||
|
||||
func (q *Queries) CreateRuneBalanceAtBlock(ctx context.Context, arg []CreateRuneBalanceAtBlockParams) *CreateRuneBalanceAtBlockBatchResults {
|
||||
batch := &pgx.Batch{}
|
||||
for _, a := range arg {
|
||||
vals := []interface{}{
|
||||
a.Pkscript,
|
||||
a.BlockHeight,
|
||||
a.RuneID,
|
||||
a.Amount,
|
||||
}
|
||||
batch.Queue(createRuneBalanceAtBlock, vals...)
|
||||
}
|
||||
br := q.db.SendBatch(ctx, batch)
|
||||
return &CreateRuneBalanceAtBlockBatchResults{br, len(arg), false}
|
||||
}
|
||||
|
||||
func (b *CreateRuneBalanceAtBlockBatchResults) Exec(f func(int, error)) {
|
||||
defer b.br.Close()
|
||||
for t := 0; t < b.tot; t++ {
|
||||
if b.closed {
|
||||
if f != nil {
|
||||
f(t, ErrBatchAlreadyClosed)
|
||||
}
|
||||
continue
|
||||
}
|
||||
_, err := b.br.Exec()
|
||||
if f != nil {
|
||||
f(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *CreateRuneBalanceAtBlockBatchResults) Close() error {
|
||||
b.closed = true
|
||||
return b.br.Close()
|
||||
}
|
||||
319
modules/runes/repository/postgres/gen/batch.sql.go
Normal file
319
modules/runes/repository/postgres/gen/batch.sql.go
Normal file
@@ -0,0 +1,319 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.27.0
|
||||
// source: batch.sql
|
||||
|
||||
package gen
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgtype"
|
||||
)
|
||||
|
||||
const batchCreateRuneEntries = `-- name: BatchCreateRuneEntries :exec
|
||||
INSERT INTO runes_entries ("rune_id", "rune", "number", "spacers", "premine", "symbol", "divisibility", "terms", "terms_amount", "terms_cap", "terms_height_start", "terms_height_end", "terms_offset_start", "terms_offset_end", "turbo", "etching_block", "etching_tx_hash", "etched_at")
|
||||
VALUES(
|
||||
unnest($1::TEXT[]),
|
||||
unnest($2::TEXT[]),
|
||||
unnest($3::BIGINT[]),
|
||||
unnest($4::INT[]),
|
||||
unnest($5::DECIMAL[]),
|
||||
unnest($6::INT[]),
|
||||
unnest($7::SMALLINT[]),
|
||||
unnest($8::BOOLEAN[]),
|
||||
unnest($9::DECIMAL[]),
|
||||
unnest($10::DECIMAL[]),
|
||||
unnest($11::INT[]), -- nullable (need patch)
|
||||
unnest($12::INT[]), -- nullable (need patch)
|
||||
unnest($13::INT[]), -- nullable (need patch)
|
||||
unnest($14::INT[]), -- nullable (need patch)
|
||||
unnest($15::BOOLEAN[]),
|
||||
unnest($16::INT[]),
|
||||
unnest($17::TEXT[]),
|
||||
unnest($18::TIMESTAMP[])
|
||||
)
|
||||
`
|
||||
|
||||
type BatchCreateRuneEntriesParams struct {
|
||||
RuneIDArr []string
|
||||
RuneArr []string
|
||||
NumberArr []int64
|
||||
SpacersArr []int32
|
||||
PremineArr []pgtype.Numeric
|
||||
SymbolArr []int32
|
||||
DivisibilityArr []int16
|
||||
TermsArr []bool
|
||||
TermsAmountArr []pgtype.Numeric
|
||||
TermsCapArr []pgtype.Numeric
|
||||
TermsHeightStartArr []int32
|
||||
TermsHeightEndArr []int32
|
||||
TermsOffsetStartArr []int32
|
||||
TermsOffsetEndArr []int32
|
||||
TurboArr []bool
|
||||
EtchingBlockArr []int32
|
||||
EtchingTxHashArr []string
|
||||
EtchedAtArr []pgtype.Timestamp
|
||||
}
|
||||
|
||||
func (q *Queries) BatchCreateRuneEntries(ctx context.Context, arg BatchCreateRuneEntriesParams) error {
|
||||
_, err := q.db.Exec(ctx, batchCreateRuneEntries,
|
||||
arg.RuneIDArr,
|
||||
arg.RuneArr,
|
||||
arg.NumberArr,
|
||||
arg.SpacersArr,
|
||||
arg.PremineArr,
|
||||
arg.SymbolArr,
|
||||
arg.DivisibilityArr,
|
||||
arg.TermsArr,
|
||||
arg.TermsAmountArr,
|
||||
arg.TermsCapArr,
|
||||
arg.TermsHeightStartArr,
|
||||
arg.TermsHeightEndArr,
|
||||
arg.TermsOffsetStartArr,
|
||||
arg.TermsOffsetEndArr,
|
||||
arg.TurboArr,
|
||||
arg.EtchingBlockArr,
|
||||
arg.EtchingTxHashArr,
|
||||
arg.EtchedAtArr,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
const batchCreateRuneEntryStates = `-- name: BatchCreateRuneEntryStates :exec
|
||||
INSERT INTO runes_entry_states ("rune_id", "block_height", "mints", "burned_amount", "completed_at", "completed_at_height")
|
||||
VALUES(
|
||||
unnest($1::TEXT[]),
|
||||
unnest($2::INT[]),
|
||||
unnest($3::DECIMAL[]),
|
||||
unnest($4::DECIMAL[]),
|
||||
unnest($5::TIMESTAMP[]),
|
||||
unnest($6::INT[]) -- nullable (need patch)
|
||||
)
|
||||
`
|
||||
|
||||
type BatchCreateRuneEntryStatesParams struct {
|
||||
RuneIDArr []string
|
||||
BlockHeightArr []int32
|
||||
MintsArr []pgtype.Numeric
|
||||
BurnedAmountArr []pgtype.Numeric
|
||||
CompletedAtArr []pgtype.Timestamp
|
||||
CompletedAtHeightArr []int32
|
||||
}
|
||||
|
||||
func (q *Queries) BatchCreateRuneEntryStates(ctx context.Context, arg BatchCreateRuneEntryStatesParams) error {
|
||||
_, err := q.db.Exec(ctx, batchCreateRuneEntryStates,
|
||||
arg.RuneIDArr,
|
||||
arg.BlockHeightArr,
|
||||
arg.MintsArr,
|
||||
arg.BurnedAmountArr,
|
||||
arg.CompletedAtArr,
|
||||
arg.CompletedAtHeightArr,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
const batchCreateRuneTransactions = `-- name: BatchCreateRuneTransactions :exec
|
||||
INSERT INTO runes_transactions ("hash", "block_height", "index", "timestamp", "inputs", "outputs", "mints", "burns", "rune_etched")
|
||||
VALUES (
|
||||
unnest($1::TEXT[]),
|
||||
unnest($2::INT[]),
|
||||
unnest($3::INT[]),
|
||||
unnest($4::TIMESTAMP[]),
|
||||
unnest($5::JSONB[]),
|
||||
unnest($6::JSONB[]),
|
||||
unnest($7::JSONB[]),
|
||||
unnest($8::JSONB[]),
|
||||
unnest($9::BOOLEAN[])
|
||||
)
|
||||
`
|
||||
|
||||
type BatchCreateRuneTransactionsParams struct {
|
||||
HashArr []string
|
||||
BlockHeightArr []int32
|
||||
IndexArr []int32
|
||||
TimestampArr []pgtype.Timestamp
|
||||
InputsArr [][]byte
|
||||
OutputsArr [][]byte
|
||||
MintsArr [][]byte
|
||||
BurnsArr [][]byte
|
||||
RuneEtchedArr []bool
|
||||
}
|
||||
|
||||
func (q *Queries) BatchCreateRuneTransactions(ctx context.Context, arg BatchCreateRuneTransactionsParams) error {
|
||||
_, err := q.db.Exec(ctx, batchCreateRuneTransactions,
|
||||
arg.HashArr,
|
||||
arg.BlockHeightArr,
|
||||
arg.IndexArr,
|
||||
arg.TimestampArr,
|
||||
arg.InputsArr,
|
||||
arg.OutputsArr,
|
||||
arg.MintsArr,
|
||||
arg.BurnsArr,
|
||||
arg.RuneEtchedArr,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
const batchCreateRunesBalances = `-- name: BatchCreateRunesBalances :exec
|
||||
INSERT INTO runes_balances ("pkscript", "block_height", "rune_id", "amount")
|
||||
VALUES(
|
||||
unnest($1::TEXT[]),
|
||||
unnest($2::INT[]),
|
||||
unnest($3::TEXT[]),
|
||||
unnest($4::DECIMAL[])
|
||||
)
|
||||
`
|
||||
|
||||
type BatchCreateRunesBalancesParams struct {
|
||||
PkscriptArr []string
|
||||
BlockHeightArr []int32
|
||||
RuneIDArr []string
|
||||
AmountArr []pgtype.Numeric
|
||||
}
|
||||
|
||||
func (q *Queries) BatchCreateRunesBalances(ctx context.Context, arg BatchCreateRunesBalancesParams) error {
|
||||
_, err := q.db.Exec(ctx, batchCreateRunesBalances,
|
||||
arg.PkscriptArr,
|
||||
arg.BlockHeightArr,
|
||||
arg.RuneIDArr,
|
||||
arg.AmountArr,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
const batchCreateRunesOutpointBalances = `-- name: BatchCreateRunesOutpointBalances :exec
|
||||
INSERT INTO runes_outpoint_balances ("rune_id", "pkscript", "tx_hash", "tx_idx", "amount", "block_height", "spent_height")
|
||||
VALUES(
|
||||
unnest($1::TEXT[]),
|
||||
unnest($2::TEXT[]),
|
||||
unnest($3::TEXT[]),
|
||||
unnest($4::INT[]),
|
||||
unnest($5::DECIMAL[]),
|
||||
unnest($6::INT[]),
|
||||
unnest($7::INT[]) -- nullable (need patch)
|
||||
)
|
||||
`
|
||||
|
||||
type BatchCreateRunesOutpointBalancesParams struct {
|
||||
RuneIDArr []string
|
||||
PkscriptArr []string
|
||||
TxHashArr []string
|
||||
TxIdxArr []int32
|
||||
AmountArr []pgtype.Numeric
|
||||
BlockHeightArr []int32
|
||||
SpentHeightArr []int32
|
||||
}
|
||||
|
||||
func (q *Queries) BatchCreateRunesOutpointBalances(ctx context.Context, arg BatchCreateRunesOutpointBalancesParams) error {
|
||||
_, err := q.db.Exec(ctx, batchCreateRunesOutpointBalances,
|
||||
arg.RuneIDArr,
|
||||
arg.PkscriptArr,
|
||||
arg.TxHashArr,
|
||||
arg.TxIdxArr,
|
||||
arg.AmountArr,
|
||||
arg.BlockHeightArr,
|
||||
arg.SpentHeightArr,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
const batchCreateRunestones = `-- name: BatchCreateRunestones :exec
|
||||
INSERT INTO runes_runestones ("tx_hash", "block_height", "etching", "etching_divisibility", "etching_premine", "etching_rune", "etching_spacers", "etching_symbol", "etching_terms", "etching_terms_amount", "etching_terms_cap", "etching_terms_height_start", "etching_terms_height_end", "etching_terms_offset_start", "etching_terms_offset_end", "etching_turbo", "edicts", "mint", "pointer", "cenotaph", "flaws")
|
||||
VALUES(
|
||||
unnest($1::TEXT[]),
|
||||
unnest($2::INT[]),
|
||||
unnest($3::BOOLEAN[]),
|
||||
unnest($4::SMALLINT[]), -- nullable (need patch)
|
||||
unnest($5::DECIMAL[]),
|
||||
unnest($6::TEXT[]), -- nullable (need patch)
|
||||
unnest($7::INT[]), -- nullable (need patch)
|
||||
unnest($8::INT[]), -- nullable (need patch)
|
||||
unnest($9::BOOLEAN[]), -- nullable (need patch)
|
||||
unnest($10::DECIMAL[]),
|
||||
unnest($11::DECIMAL[]),
|
||||
unnest($12::INT[]), -- nullable (need patch)
|
||||
unnest($13::INT[]), -- nullable (need patch)
|
||||
unnest($14::INT[]), -- nullable (need patch)
|
||||
unnest($15::INT[]), -- nullable (need patch)
|
||||
unnest($16::BOOLEAN[]), -- nullable (need patch)
|
||||
unnest($17::JSONB[]),
|
||||
unnest($18::TEXT[]), -- nullable (need patch)
|
||||
unnest($19::INT[]), -- nullable (need patch)
|
||||
unnest($20::BOOLEAN[]),
|
||||
unnest($21::INT[])
|
||||
)
|
||||
`
|
||||
|
||||
type BatchCreateRunestonesParams struct {
|
||||
TxHashArr []string
|
||||
BlockHeightArr []int32
|
||||
EtchingArr []bool
|
||||
EtchingDivisibilityArr []int16
|
||||
EtchingPremineArr []pgtype.Numeric
|
||||
EtchingRuneArr []string
|
||||
EtchingSpacersArr []int32
|
||||
EtchingSymbolArr []int32
|
||||
EtchingTermsArr []bool
|
||||
EtchingTermsAmountArr []pgtype.Numeric
|
||||
EtchingTermsCapArr []pgtype.Numeric
|
||||
EtchingTermsHeightStartArr []int32
|
||||
EtchingTermsHeightEndArr []int32
|
||||
EtchingTermsOffsetStartArr []int32
|
||||
EtchingTermsOffsetEndArr []int32
|
||||
EtchingTurboArr []bool
|
||||
EdictsArr [][]byte
|
||||
MintArr []string
|
||||
PointerArr []int32
|
||||
CenotaphArr []bool
|
||||
FlawsArr []int32
|
||||
}
|
||||
|
||||
func (q *Queries) BatchCreateRunestones(ctx context.Context, arg BatchCreateRunestonesParams) error {
|
||||
_, err := q.db.Exec(ctx, batchCreateRunestones,
|
||||
arg.TxHashArr,
|
||||
arg.BlockHeightArr,
|
||||
arg.EtchingArr,
|
||||
arg.EtchingDivisibilityArr,
|
||||
arg.EtchingPremineArr,
|
||||
arg.EtchingRuneArr,
|
||||
arg.EtchingSpacersArr,
|
||||
arg.EtchingSymbolArr,
|
||||
arg.EtchingTermsArr,
|
||||
arg.EtchingTermsAmountArr,
|
||||
arg.EtchingTermsCapArr,
|
||||
arg.EtchingTermsHeightStartArr,
|
||||
arg.EtchingTermsHeightEndArr,
|
||||
arg.EtchingTermsOffsetStartArr,
|
||||
arg.EtchingTermsOffsetEndArr,
|
||||
arg.EtchingTurboArr,
|
||||
arg.EdictsArr,
|
||||
arg.MintArr,
|
||||
arg.PointerArr,
|
||||
arg.CenotaphArr,
|
||||
arg.FlawsArr,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
const batchSpendOutpointBalances = `-- name: BatchSpendOutpointBalances :exec
|
||||
UPDATE runes_outpoint_balances
|
||||
SET "spent_height" = $1::INT
|
||||
FROM (
|
||||
SELECT
|
||||
unnest($2::TEXT[]) AS tx_hash,
|
||||
unnest($3::INT[]) AS tx_idx
|
||||
) AS input
|
||||
WHERE "runes_outpoint_balances"."tx_hash" = "input"."tx_hash" AND "runes_outpoint_balances"."tx_idx" = "input"."tx_idx"
|
||||
`
|
||||
|
||||
type BatchSpendOutpointBalancesParams struct {
|
||||
SpentHeight int32
|
||||
TxHashArr []string
|
||||
TxIdxArr []int32
|
||||
}
|
||||
|
||||
func (q *Queries) BatchSpendOutpointBalances(ctx context.Context, arg BatchSpendOutpointBalancesParams) error {
|
||||
_, err := q.db.Exec(ctx, batchSpendOutpointBalances, arg.SpentHeight, arg.TxHashArr, arg.TxIdxArr)
|
||||
return err
|
||||
}
|
||||
118
modules/runes/repository/postgres/gen/batch.sql.patch.go
Normal file
118
modules/runes/repository/postgres/gen/batch.sql.patch.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package gen
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/jackc/pgx/v5/pgtype"
|
||||
)
|
||||
|
||||
type BatchCreateRuneEntriesPatchedParams struct {
|
||||
BatchCreateRuneEntriesParams
|
||||
TermsHeightStartArr []pgtype.Int4
|
||||
TermsHeightEndArr []pgtype.Int4
|
||||
TermsOffsetStartArr []pgtype.Int4
|
||||
TermsOffsetEndArr []pgtype.Int4
|
||||
}
|
||||
|
||||
func (q *Queries) BatchCreateRuneEntriesPatched(ctx context.Context, arg BatchCreateRuneEntriesPatchedParams) error {
|
||||
_, err := q.db.Exec(ctx, batchCreateRuneEntries,
|
||||
arg.RuneIDArr,
|
||||
arg.RuneArr,
|
||||
arg.NumberArr,
|
||||
arg.SpacersArr,
|
||||
arg.PremineArr,
|
||||
arg.SymbolArr,
|
||||
arg.DivisibilityArr,
|
||||
arg.TermsArr,
|
||||
arg.TermsAmountArr,
|
||||
arg.TermsCapArr,
|
||||
arg.TermsHeightStartArr,
|
||||
arg.TermsHeightEndArr,
|
||||
arg.TermsOffsetStartArr,
|
||||
arg.TermsOffsetEndArr,
|
||||
arg.TurboArr,
|
||||
arg.EtchingBlockArr,
|
||||
arg.EtchingTxHashArr,
|
||||
arg.EtchedAtArr,
|
||||
)
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
type BatchCreateRuneEntryStatesPatchedParams struct {
|
||||
BatchCreateRuneEntryStatesParams
|
||||
CompletedAtHeightArr []pgtype.Int4
|
||||
}
|
||||
|
||||
func (q *Queries) BatchCreateRuneEntryStatesPatched(ctx context.Context, arg BatchCreateRuneEntryStatesPatchedParams) error {
|
||||
_, err := q.db.Exec(ctx, batchCreateRuneEntryStates,
|
||||
arg.RuneIDArr,
|
||||
arg.BlockHeightArr,
|
||||
arg.MintsArr,
|
||||
arg.BurnedAmountArr,
|
||||
arg.CompletedAtArr,
|
||||
arg.CompletedAtHeightArr,
|
||||
)
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
type BatchCreateRunesOutpointBalancesPatchedParams struct {
|
||||
BatchCreateRunesOutpointBalancesParams
|
||||
SpentHeightArr []pgtype.Int4
|
||||
}
|
||||
|
||||
func (q *Queries) BatchCreateRunesOutpointBalancesPatched(ctx context.Context, arg BatchCreateRunesOutpointBalancesPatchedParams) error {
|
||||
_, err := q.db.Exec(ctx, batchCreateRunesOutpointBalances,
|
||||
arg.RuneIDArr,
|
||||
arg.PkscriptArr,
|
||||
arg.TxHashArr,
|
||||
arg.TxIdxArr,
|
||||
arg.AmountArr,
|
||||
arg.BlockHeightArr,
|
||||
arg.SpentHeightArr,
|
||||
)
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
type BatchCreateRunestonesPatchedParams struct {
|
||||
BatchCreateRunestonesParams
|
||||
EtchingDivisibilityArr []pgtype.Int2
|
||||
EtchingRuneArr []pgtype.Text
|
||||
EtchingSpacersArr []pgtype.Int4
|
||||
EtchingSymbolArr []pgtype.Int4
|
||||
EtchingTermsArr []pgtype.Bool
|
||||
EtchingTermsHeightStartArr []pgtype.Int4
|
||||
EtchingTermsHeightEndArr []pgtype.Int4
|
||||
EtchingTermsOffsetStartArr []pgtype.Int4
|
||||
EtchingTermsOffsetEndArr []pgtype.Int4
|
||||
EtchingTurboArr []pgtype.Bool
|
||||
MintArr []pgtype.Text
|
||||
PointerArr []pgtype.Int4
|
||||
}
|
||||
|
||||
func (q *Queries) BatchCreateRunestonesPatched(ctx context.Context, arg BatchCreateRunestonesPatchedParams) error {
|
||||
_, err := q.db.Exec(ctx, batchCreateRunestones,
|
||||
arg.TxHashArr,
|
||||
arg.BlockHeightArr,
|
||||
arg.EtchingArr,
|
||||
arg.EtchingDivisibilityArr,
|
||||
arg.EtchingPremineArr,
|
||||
arg.EtchingRuneArr,
|
||||
arg.EtchingSpacersArr,
|
||||
arg.EtchingSymbolArr,
|
||||
arg.EtchingTermsArr,
|
||||
arg.EtchingTermsAmountArr,
|
||||
arg.EtchingTermsCapArr,
|
||||
arg.EtchingTermsHeightStartArr,
|
||||
arg.EtchingTermsHeightEndArr,
|
||||
arg.EtchingTermsOffsetStartArr,
|
||||
arg.EtchingTermsOffsetEndArr,
|
||||
arg.EtchingTurboArr,
|
||||
arg.EdictsArr,
|
||||
arg.MintArr,
|
||||
arg.PointerArr,
|
||||
arg.CenotaphArr,
|
||||
arg.FlawsArr,
|
||||
)
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.26.0
|
||||
// sqlc v1.27.0
|
||||
// source: data.sql
|
||||
|
||||
package gen
|
||||
@@ -45,6 +45,54 @@ func (q *Queries) CreateIndexedBlock(ctx context.Context, arg CreateIndexedBlock
|
||||
return err
|
||||
}
|
||||
|
||||
const createOutPointBalance = `-- name: CreateOutPointBalance :exec
|
||||
INSERT INTO runes_outpoint_balances (rune_id, pkscript, tx_hash, tx_idx, amount, block_height, spent_height) VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||
`
|
||||
|
||||
type CreateOutPointBalanceParams struct {
|
||||
RuneID string
|
||||
Pkscript string
|
||||
TxHash string
|
||||
TxIdx int32
|
||||
Amount pgtype.Numeric
|
||||
BlockHeight int32
|
||||
SpentHeight pgtype.Int4
|
||||
}
|
||||
|
||||
func (q *Queries) CreateOutPointBalance(ctx context.Context, arg CreateOutPointBalanceParams) error {
|
||||
_, err := q.db.Exec(ctx, createOutPointBalance,
|
||||
arg.RuneID,
|
||||
arg.Pkscript,
|
||||
arg.TxHash,
|
||||
arg.TxIdx,
|
||||
arg.Amount,
|
||||
arg.BlockHeight,
|
||||
arg.SpentHeight,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
const createRuneBalance = `-- name: CreateRuneBalance :exec
|
||||
INSERT INTO runes_balances (pkscript, block_height, rune_id, amount) VALUES ($1, $2, $3, $4)
|
||||
`
|
||||
|
||||
type CreateRuneBalanceParams struct {
|
||||
Pkscript string
|
||||
BlockHeight int32
|
||||
RuneID string
|
||||
Amount pgtype.Numeric
|
||||
}
|
||||
|
||||
func (q *Queries) CreateRuneBalance(ctx context.Context, arg CreateRuneBalanceParams) error {
|
||||
_, err := q.db.Exec(ctx, createRuneBalance,
|
||||
arg.Pkscript,
|
||||
arg.BlockHeight,
|
||||
arg.RuneID,
|
||||
arg.Amount,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
const createRuneEntry = `-- name: CreateRuneEntry :exec
|
||||
INSERT INTO runes_entries (rune_id, rune, number, spacers, premine, symbol, divisibility, terms, terms_amount, terms_cap, terms_height_start, terms_height_end, terms_offset_start, terms_offset_end, turbo, etching_block, etching_tx_hash, etched_at)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18)
|
||||
@@ -296,12 +344,14 @@ const getBalancesByPkScript = `-- name: GetBalancesByPkScript :many
|
||||
WITH balances AS (
|
||||
SELECT DISTINCT ON (rune_id) pkscript, block_height, rune_id, amount FROM runes_balances WHERE pkscript = $1 AND block_height <= $2 ORDER BY rune_id, block_height DESC
|
||||
)
|
||||
SELECT pkscript, block_height, rune_id, amount FROM balances WHERE amount > 0
|
||||
SELECT pkscript, block_height, rune_id, amount FROM balances WHERE amount > 0 ORDER BY amount DESC, rune_id LIMIT $3 OFFSET $4
|
||||
`
|
||||
|
||||
type GetBalancesByPkScriptParams struct {
|
||||
Pkscript string
|
||||
BlockHeight int32
|
||||
Limit int32
|
||||
Offset int32
|
||||
}
|
||||
|
||||
type GetBalancesByPkScriptRow struct {
|
||||
@@ -312,7 +362,12 @@ type GetBalancesByPkScriptRow struct {
|
||||
}
|
||||
|
||||
func (q *Queries) GetBalancesByPkScript(ctx context.Context, arg GetBalancesByPkScriptParams) ([]GetBalancesByPkScriptRow, error) {
|
||||
rows, err := q.db.Query(ctx, getBalancesByPkScript, arg.Pkscript, arg.BlockHeight)
|
||||
rows, err := q.db.Query(ctx, getBalancesByPkScript,
|
||||
arg.Pkscript,
|
||||
arg.BlockHeight,
|
||||
arg.Limit,
|
||||
arg.Offset,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -340,12 +395,14 @@ const getBalancesByRuneId = `-- name: GetBalancesByRuneId :many
|
||||
WITH balances AS (
|
||||
SELECT DISTINCT ON (pkscript) pkscript, block_height, rune_id, amount FROM runes_balances WHERE rune_id = $1 AND block_height <= $2 ORDER BY pkscript, block_height DESC
|
||||
)
|
||||
SELECT pkscript, block_height, rune_id, amount FROM balances WHERE amount > 0
|
||||
SELECT pkscript, block_height, rune_id, amount FROM balances WHERE amount > 0 ORDER BY amount DESC, pkscript LIMIT $3 OFFSET $4
|
||||
`
|
||||
|
||||
type GetBalancesByRuneIdParams struct {
|
||||
RuneID string
|
||||
BlockHeight int32
|
||||
Limit int32
|
||||
Offset int32
|
||||
}
|
||||
|
||||
type GetBalancesByRuneIdRow struct {
|
||||
@@ -356,7 +413,12 @@ type GetBalancesByRuneIdRow struct {
|
||||
}
|
||||
|
||||
func (q *Queries) GetBalancesByRuneId(ctx context.Context, arg GetBalancesByRuneIdParams) ([]GetBalancesByRuneIdRow, error) {
|
||||
rows, err := q.db.Query(ctx, getBalancesByRuneId, arg.RuneID, arg.BlockHeight)
|
||||
rows, err := q.db.Query(ctx, getBalancesByRuneId,
|
||||
arg.RuneID,
|
||||
arg.BlockHeight,
|
||||
arg.Limit,
|
||||
arg.Offset,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -414,6 +476,120 @@ func (q *Queries) GetLatestIndexedBlock(ctx context.Context) (RunesIndexedBlock,
|
||||
return i, err
|
||||
}
|
||||
|
||||
const getOngoingRuneEntries = `-- name: GetOngoingRuneEntries :many
|
||||
WITH states AS (
|
||||
-- select latest state
|
||||
SELECT DISTINCT ON (rune_id) rune_id, block_height, mints, burned_amount, completed_at, completed_at_height FROM runes_entry_states WHERE block_height <= $1::integer ORDER BY rune_id, block_height DESC
|
||||
)
|
||||
SELECT runes_entries.rune_id, number, rune, spacers, premine, symbol, divisibility, terms, terms_amount, terms_cap, terms_height_start, terms_height_end, terms_offset_start, terms_offset_end, turbo, etching_block, etching_tx_hash, etched_at, states.rune_id, block_height, mints, burned_amount, completed_at, completed_at_height FROM runes_entries
|
||||
LEFT JOIN states ON runes_entries.rune_id = states.rune_id
|
||||
WHERE (
|
||||
runes_entries.terms = TRUE AND
|
||||
COALESCE(runes_entries.terms_amount, 0) != 0 AND
|
||||
COALESCE(runes_entries.terms_cap, 0) != 0 AND
|
||||
states.mints < runes_entries.terms_cap AND
|
||||
(
|
||||
runes_entries.terms_height_start IS NULL OR runes_entries.terms_height_start <= $1::integer
|
||||
) AND (
|
||||
runes_entries.terms_height_end IS NULL OR $1::integer <= runes_entries.terms_height_end
|
||||
) AND (
|
||||
runes_entries.terms_offset_start IS NULL OR runes_entries.terms_offset_start + runes_entries.etching_block <= $1::integer
|
||||
) AND (
|
||||
runes_entries.terms_offset_end IS NULL OR $1::integer <= runes_entries.terms_offset_start + runes_entries.etching_block
|
||||
)
|
||||
|
||||
) AND (
|
||||
$2::text = '' OR
|
||||
runes_entries.rune ILIKE '%' || $2::text || '%'
|
||||
)
|
||||
ORDER BY states.mints DESC
|
||||
LIMIT $4 OFFSET $3
|
||||
`
|
||||
|
||||
type GetOngoingRuneEntriesParams struct {
|
||||
Height int32
|
||||
Search string
|
||||
Offset int32
|
||||
Limit int32
|
||||
}
|
||||
|
||||
type GetOngoingRuneEntriesRow struct {
|
||||
RuneID string
|
||||
Number int64
|
||||
Rune string
|
||||
Spacers int32
|
||||
Premine pgtype.Numeric
|
||||
Symbol int32
|
||||
Divisibility int16
|
||||
Terms bool
|
||||
TermsAmount pgtype.Numeric
|
||||
TermsCap pgtype.Numeric
|
||||
TermsHeightStart pgtype.Int4
|
||||
TermsHeightEnd pgtype.Int4
|
||||
TermsOffsetStart pgtype.Int4
|
||||
TermsOffsetEnd pgtype.Int4
|
||||
Turbo bool
|
||||
EtchingBlock int32
|
||||
EtchingTxHash string
|
||||
EtchedAt pgtype.Timestamp
|
||||
RuneID_2 pgtype.Text
|
||||
BlockHeight pgtype.Int4
|
||||
Mints pgtype.Numeric
|
||||
BurnedAmount pgtype.Numeric
|
||||
CompletedAt pgtype.Timestamp
|
||||
CompletedAtHeight pgtype.Int4
|
||||
}
|
||||
|
||||
func (q *Queries) GetOngoingRuneEntries(ctx context.Context, arg GetOngoingRuneEntriesParams) ([]GetOngoingRuneEntriesRow, error) {
|
||||
rows, err := q.db.Query(ctx, getOngoingRuneEntries,
|
||||
arg.Height,
|
||||
arg.Search,
|
||||
arg.Offset,
|
||||
arg.Limit,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []GetOngoingRuneEntriesRow
|
||||
for rows.Next() {
|
||||
var i GetOngoingRuneEntriesRow
|
||||
if err := rows.Scan(
|
||||
&i.RuneID,
|
||||
&i.Number,
|
||||
&i.Rune,
|
||||
&i.Spacers,
|
||||
&i.Premine,
|
||||
&i.Symbol,
|
||||
&i.Divisibility,
|
||||
&i.Terms,
|
||||
&i.TermsAmount,
|
||||
&i.TermsCap,
|
||||
&i.TermsHeightStart,
|
||||
&i.TermsHeightEnd,
|
||||
&i.TermsOffsetStart,
|
||||
&i.TermsOffsetEnd,
|
||||
&i.Turbo,
|
||||
&i.EtchingBlock,
|
||||
&i.EtchingTxHash,
|
||||
&i.EtchedAt,
|
||||
&i.RuneID_2,
|
||||
&i.BlockHeight,
|
||||
&i.Mints,
|
||||
&i.BurnedAmount,
|
||||
&i.CompletedAt,
|
||||
&i.CompletedAtHeight,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getOutPointBalancesAtOutPoint = `-- name: GetOutPointBalancesAtOutPoint :many
|
||||
SELECT rune_id, pkscript, tx_hash, tx_idx, amount, block_height, spent_height FROM runes_outpoint_balances WHERE tx_hash = $1 AND tx_idx = $2
|
||||
`
|
||||
@@ -451,6 +627,105 @@ func (q *Queries) GetOutPointBalancesAtOutPoint(ctx context.Context, arg GetOutP
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getRuneEntries = `-- name: GetRuneEntries :many
|
||||
WITH states AS (
|
||||
-- select latest state
|
||||
SELECT DISTINCT ON (rune_id) rune_id, block_height, mints, burned_amount, completed_at, completed_at_height FROM runes_entry_states WHERE block_height <= $4 ORDER BY rune_id, block_height DESC
|
||||
)
|
||||
SELECT runes_entries.rune_id, number, rune, spacers, premine, symbol, divisibility, terms, terms_amount, terms_cap, terms_height_start, terms_height_end, terms_offset_start, terms_offset_end, turbo, etching_block, etching_tx_hash, etched_at, states.rune_id, block_height, mints, burned_amount, completed_at, completed_at_height FROM runes_entries
|
||||
LEFT JOIN states ON runes_entries.rune_id = states.rune_id
|
||||
WHERE (
|
||||
$1 = '' OR
|
||||
runes_entries.rune ILIKE $1 || '%'
|
||||
)
|
||||
ORDER BY runes_entries.number
|
||||
LIMIT $3 OFFSET $2
|
||||
`
|
||||
|
||||
type GetRuneEntriesParams struct {
|
||||
Search interface{}
|
||||
Offset int32
|
||||
Limit int32
|
||||
Height int32
|
||||
}
|
||||
|
||||
type GetRuneEntriesRow struct {
|
||||
RuneID string
|
||||
Number int64
|
||||
Rune string
|
||||
Spacers int32
|
||||
Premine pgtype.Numeric
|
||||
Symbol int32
|
||||
Divisibility int16
|
||||
Terms bool
|
||||
TermsAmount pgtype.Numeric
|
||||
TermsCap pgtype.Numeric
|
||||
TermsHeightStart pgtype.Int4
|
||||
TermsHeightEnd pgtype.Int4
|
||||
TermsOffsetStart pgtype.Int4
|
||||
TermsOffsetEnd pgtype.Int4
|
||||
Turbo bool
|
||||
EtchingBlock int32
|
||||
EtchingTxHash string
|
||||
EtchedAt pgtype.Timestamp
|
||||
RuneID_2 pgtype.Text
|
||||
BlockHeight pgtype.Int4
|
||||
Mints pgtype.Numeric
|
||||
BurnedAmount pgtype.Numeric
|
||||
CompletedAt pgtype.Timestamp
|
||||
CompletedAtHeight pgtype.Int4
|
||||
}
|
||||
|
||||
func (q *Queries) GetRuneEntries(ctx context.Context, arg GetRuneEntriesParams) ([]GetRuneEntriesRow, error) {
|
||||
rows, err := q.db.Query(ctx, getRuneEntries,
|
||||
arg.Search,
|
||||
arg.Offset,
|
||||
arg.Limit,
|
||||
arg.Height,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []GetRuneEntriesRow
|
||||
for rows.Next() {
|
||||
var i GetRuneEntriesRow
|
||||
if err := rows.Scan(
|
||||
&i.RuneID,
|
||||
&i.Number,
|
||||
&i.Rune,
|
||||
&i.Spacers,
|
||||
&i.Premine,
|
||||
&i.Symbol,
|
||||
&i.Divisibility,
|
||||
&i.Terms,
|
||||
&i.TermsAmount,
|
||||
&i.TermsCap,
|
||||
&i.TermsHeightStart,
|
||||
&i.TermsHeightEnd,
|
||||
&i.TermsOffsetStart,
|
||||
&i.TermsOffsetEnd,
|
||||
&i.Turbo,
|
||||
&i.EtchingBlock,
|
||||
&i.EtchingTxHash,
|
||||
&i.EtchedAt,
|
||||
&i.RuneID_2,
|
||||
&i.BlockHeight,
|
||||
&i.Mints,
|
||||
&i.BurnedAmount,
|
||||
&i.CompletedAt,
|
||||
&i.CompletedAtHeight,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getRuneEntriesByRuneIds = `-- name: GetRuneEntriesByRuneIds :many
|
||||
WITH states AS (
|
||||
-- select latest state
|
||||
@@ -631,27 +906,106 @@ func (q *Queries) GetRuneIdFromRune(ctx context.Context, rune string) (string, e
|
||||
return rune_id, err
|
||||
}
|
||||
|
||||
const getRuneTransaction = `-- name: GetRuneTransaction :one
|
||||
SELECT hash, runes_transactions.block_height, index, timestamp, inputs, outputs, mints, burns, rune_etched, tx_hash, runes_runestones.block_height, etching, etching_divisibility, etching_premine, etching_rune, etching_spacers, etching_symbol, etching_terms, etching_terms_amount, etching_terms_cap, etching_terms_height_start, etching_terms_height_end, etching_terms_offset_start, etching_terms_offset_end, etching_turbo, edicts, mint, pointer, cenotaph, flaws FROM runes_transactions
|
||||
LEFT JOIN runes_runestones ON runes_transactions.hash = runes_runestones.tx_hash
|
||||
WHERE hash = $1 LIMIT 1
|
||||
`
|
||||
|
||||
type GetRuneTransactionRow struct {
|
||||
Hash string
|
||||
BlockHeight int32
|
||||
Index int32
|
||||
Timestamp pgtype.Timestamp
|
||||
Inputs []byte
|
||||
Outputs []byte
|
||||
Mints []byte
|
||||
Burns []byte
|
||||
RuneEtched bool
|
||||
TxHash pgtype.Text
|
||||
BlockHeight_2 pgtype.Int4
|
||||
Etching pgtype.Bool
|
||||
EtchingDivisibility pgtype.Int2
|
||||
EtchingPremine pgtype.Numeric
|
||||
EtchingRune pgtype.Text
|
||||
EtchingSpacers pgtype.Int4
|
||||
EtchingSymbol pgtype.Int4
|
||||
EtchingTerms pgtype.Bool
|
||||
EtchingTermsAmount pgtype.Numeric
|
||||
EtchingTermsCap pgtype.Numeric
|
||||
EtchingTermsHeightStart pgtype.Int4
|
||||
EtchingTermsHeightEnd pgtype.Int4
|
||||
EtchingTermsOffsetStart pgtype.Int4
|
||||
EtchingTermsOffsetEnd pgtype.Int4
|
||||
EtchingTurbo pgtype.Bool
|
||||
Edicts []byte
|
||||
Mint pgtype.Text
|
||||
Pointer pgtype.Int4
|
||||
Cenotaph pgtype.Bool
|
||||
Flaws pgtype.Int4
|
||||
}
|
||||
|
||||
func (q *Queries) GetRuneTransaction(ctx context.Context, hash string) (GetRuneTransactionRow, error) {
|
||||
row := q.db.QueryRow(ctx, getRuneTransaction, hash)
|
||||
var i GetRuneTransactionRow
|
||||
err := row.Scan(
|
||||
&i.Hash,
|
||||
&i.BlockHeight,
|
||||
&i.Index,
|
||||
&i.Timestamp,
|
||||
&i.Inputs,
|
||||
&i.Outputs,
|
||||
&i.Mints,
|
||||
&i.Burns,
|
||||
&i.RuneEtched,
|
||||
&i.TxHash,
|
||||
&i.BlockHeight_2,
|
||||
&i.Etching,
|
||||
&i.EtchingDivisibility,
|
||||
&i.EtchingPremine,
|
||||
&i.EtchingRune,
|
||||
&i.EtchingSpacers,
|
||||
&i.EtchingSymbol,
|
||||
&i.EtchingTerms,
|
||||
&i.EtchingTermsAmount,
|
||||
&i.EtchingTermsCap,
|
||||
&i.EtchingTermsHeightStart,
|
||||
&i.EtchingTermsHeightEnd,
|
||||
&i.EtchingTermsOffsetStart,
|
||||
&i.EtchingTermsOffsetEnd,
|
||||
&i.EtchingTurbo,
|
||||
&i.Edicts,
|
||||
&i.Mint,
|
||||
&i.Pointer,
|
||||
&i.Cenotaph,
|
||||
&i.Flaws,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const getRuneTransactions = `-- name: GetRuneTransactions :many
|
||||
SELECT hash, runes_transactions.block_height, index, timestamp, inputs, outputs, mints, burns, rune_etched, tx_hash, runes_runestones.block_height, etching, etching_divisibility, etching_premine, etching_rune, etching_spacers, etching_symbol, etching_terms, etching_terms_amount, etching_terms_cap, etching_terms_height_start, etching_terms_height_end, etching_terms_offset_start, etching_terms_offset_end, etching_turbo, edicts, mint, pointer, cenotaph, flaws FROM runes_transactions
|
||||
LEFT JOIN runes_runestones ON runes_transactions.hash = runes_runestones.tx_hash
|
||||
WHERE (
|
||||
$1::BOOLEAN = FALSE -- if @filter_pk_script is TRUE, apply pk_script filter
|
||||
OR runes_transactions.outputs @> $2::JSONB
|
||||
OR runes_transactions.inputs @> $2::JSONB
|
||||
) AND (
|
||||
$3::BOOLEAN = FALSE -- if @filter_rune_id is TRUE, apply rune_id filter
|
||||
$3::BOOLEAN = FALSE -- if @filter_pk_script is TRUE, apply pk_script filter
|
||||
OR runes_transactions.outputs @> $4::JSONB
|
||||
OR runes_transactions.inputs @> $4::JSONB
|
||||
OR runes_transactions.mints ? $5
|
||||
OR runes_transactions.burns ? $5
|
||||
OR (runes_transactions.rune_etched = TRUE AND runes_transactions.block_height = $6 AND runes_transactions.index = $7)
|
||||
OR runes_transactions.inputs @> $4::JSONB
|
||||
) AND (
|
||||
$8 <= runes_transactions.block_height AND runes_transactions.block_height <= $9
|
||||
$5::BOOLEAN = FALSE -- if @filter_rune_id is TRUE, apply rune_id filter
|
||||
OR runes_transactions.outputs @> $6::JSONB
|
||||
OR runes_transactions.inputs @> $6::JSONB
|
||||
OR runes_transactions.mints ? $7
|
||||
OR runes_transactions.burns ? $7
|
||||
OR (runes_transactions.rune_etched = TRUE AND runes_transactions.block_height = $8 AND runes_transactions.index = $9)
|
||||
) AND (
|
||||
$10 <= runes_transactions.block_height AND runes_transactions.block_height <= $11
|
||||
)
|
||||
ORDER BY runes_transactions.block_height DESC LIMIT 10000
|
||||
ORDER BY runes_transactions.block_height DESC, runes_transactions.index DESC LIMIT $1 OFFSET $2
|
||||
`
|
||||
|
||||
type GetRuneTransactionsParams struct {
|
||||
Limit int32
|
||||
Offset int32
|
||||
FilterPkScript bool
|
||||
PkScriptParam []byte
|
||||
FilterRuneID bool
|
||||
@@ -698,6 +1052,8 @@ type GetRuneTransactionsRow struct {
|
||||
|
||||
func (q *Queries) GetRuneTransactions(ctx context.Context, arg GetRuneTransactionsParams) ([]GetRuneTransactionsRow, error) {
|
||||
rows, err := q.db.Query(ctx, getRuneTransactions,
|
||||
arg.Limit,
|
||||
arg.Offset,
|
||||
arg.FilterPkScript,
|
||||
arg.PkScriptParam,
|
||||
arg.FilterRuneID,
|
||||
@@ -757,32 +1113,53 @@ func (q *Queries) GetRuneTransactions(ctx context.Context, arg GetRuneTransactio
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getUnspentOutPointBalancesByPkScript = `-- name: GetUnspentOutPointBalancesByPkScript :many
|
||||
SELECT rune_id, pkscript, tx_hash, tx_idx, amount, block_height, spent_height FROM runes_outpoint_balances WHERE pkscript = $1 AND block_height <= $2 AND (spent_height IS NULL OR spent_height > $2)
|
||||
const getRunesUTXOsByPkScript = `-- name: GetRunesUTXOsByPkScript :many
|
||||
SELECT tx_hash, tx_idx, max("pkscript") as pkscript, array_agg("rune_id") as rune_ids, array_agg("amount") as amounts
|
||||
FROM runes_outpoint_balances
|
||||
WHERE
|
||||
pkscript = $3 AND
|
||||
block_height <= $4 AND
|
||||
(spent_height IS NULL OR spent_height > $4)
|
||||
GROUP BY tx_hash, tx_idx
|
||||
ORDER BY tx_hash, tx_idx
|
||||
LIMIT $1 OFFSET $2
|
||||
`
|
||||
|
||||
type GetUnspentOutPointBalancesByPkScriptParams struct {
|
||||
type GetRunesUTXOsByPkScriptParams struct {
|
||||
Limit int32
|
||||
Offset int32
|
||||
Pkscript string
|
||||
BlockHeight int32
|
||||
}
|
||||
|
||||
func (q *Queries) GetUnspentOutPointBalancesByPkScript(ctx context.Context, arg GetUnspentOutPointBalancesByPkScriptParams) ([]RunesOutpointBalance, error) {
|
||||
rows, err := q.db.Query(ctx, getUnspentOutPointBalancesByPkScript, arg.Pkscript, arg.BlockHeight)
|
||||
type GetRunesUTXOsByPkScriptRow struct {
|
||||
TxHash string
|
||||
TxIdx int32
|
||||
Pkscript interface{}
|
||||
RuneIds interface{}
|
||||
Amounts interface{}
|
||||
}
|
||||
|
||||
func (q *Queries) GetRunesUTXOsByPkScript(ctx context.Context, arg GetRunesUTXOsByPkScriptParams) ([]GetRunesUTXOsByPkScriptRow, error) {
|
||||
rows, err := q.db.Query(ctx, getRunesUTXOsByPkScript,
|
||||
arg.Limit,
|
||||
arg.Offset,
|
||||
arg.Pkscript,
|
||||
arg.BlockHeight,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []RunesOutpointBalance
|
||||
var items []GetRunesUTXOsByPkScriptRow
|
||||
for rows.Next() {
|
||||
var i RunesOutpointBalance
|
||||
var i GetRunesUTXOsByPkScriptRow
|
||||
if err := rows.Scan(
|
||||
&i.RuneID,
|
||||
&i.Pkscript,
|
||||
&i.TxHash,
|
||||
&i.TxIdx,
|
||||
&i.Amount,
|
||||
&i.BlockHeight,
|
||||
&i.SpentHeight,
|
||||
&i.Pkscript,
|
||||
&i.RuneIds,
|
||||
&i.Amounts,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -794,18 +1171,116 @@ func (q *Queries) GetUnspentOutPointBalancesByPkScript(ctx context.Context, arg
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const spendOutPointBalances = `-- name: SpendOutPointBalances :exec
|
||||
const getRunesUTXOsByRuneIdAndPkScript = `-- name: GetRunesUTXOsByRuneIdAndPkScript :many
|
||||
SELECT tx_hash, tx_idx, max("pkscript") as pkscript, array_agg("rune_id") as rune_ids, array_agg("amount") as amounts
|
||||
FROM runes_outpoint_balances
|
||||
WHERE
|
||||
pkscript = $3 AND
|
||||
block_height <= $4 AND
|
||||
(spent_height IS NULL OR spent_height > $4)
|
||||
GROUP BY tx_hash, tx_idx
|
||||
HAVING array_agg("rune_id") @> $5::text[]
|
||||
ORDER BY tx_hash, tx_idx
|
||||
LIMIT $1 OFFSET $2
|
||||
`
|
||||
|
||||
type GetRunesUTXOsByRuneIdAndPkScriptParams struct {
|
||||
Limit int32
|
||||
Offset int32
|
||||
Pkscript string
|
||||
BlockHeight int32
|
||||
RuneIds []string
|
||||
}
|
||||
|
||||
type GetRunesUTXOsByRuneIdAndPkScriptRow struct {
|
||||
TxHash string
|
||||
TxIdx int32
|
||||
Pkscript interface{}
|
||||
RuneIds interface{}
|
||||
Amounts interface{}
|
||||
}
|
||||
|
||||
func (q *Queries) GetRunesUTXOsByRuneIdAndPkScript(ctx context.Context, arg GetRunesUTXOsByRuneIdAndPkScriptParams) ([]GetRunesUTXOsByRuneIdAndPkScriptRow, error) {
|
||||
rows, err := q.db.Query(ctx, getRunesUTXOsByRuneIdAndPkScript,
|
||||
arg.Limit,
|
||||
arg.Offset,
|
||||
arg.Pkscript,
|
||||
arg.BlockHeight,
|
||||
arg.RuneIds,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []GetRunesUTXOsByRuneIdAndPkScriptRow
|
||||
for rows.Next() {
|
||||
var i GetRunesUTXOsByRuneIdAndPkScriptRow
|
||||
if err := rows.Scan(
|
||||
&i.TxHash,
|
||||
&i.TxIdx,
|
||||
&i.Pkscript,
|
||||
&i.RuneIds,
|
||||
&i.Amounts,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getTotalHoldersByRuneIds = `-- name: GetTotalHoldersByRuneIds :many
|
||||
WITH balances AS (
|
||||
SELECT DISTINCT ON (rune_id, pkscript) pkscript, block_height, rune_id, amount FROM runes_balances WHERE rune_id = ANY($1::TEXT[]) AND block_height <= $2 ORDER BY rune_id, pkscript, block_height DESC
|
||||
)
|
||||
SELECT rune_id, COUNT(DISTINCT pkscript) FROM balances WHERE amount > 0 GROUP BY rune_id
|
||||
`
|
||||
|
||||
type GetTotalHoldersByRuneIdsParams struct {
|
||||
RuneIds []string
|
||||
BlockHeight int32
|
||||
}
|
||||
|
||||
type GetTotalHoldersByRuneIdsRow struct {
|
||||
RuneID string
|
||||
Count int64
|
||||
}
|
||||
|
||||
func (q *Queries) GetTotalHoldersByRuneIds(ctx context.Context, arg GetTotalHoldersByRuneIdsParams) ([]GetTotalHoldersByRuneIdsRow, error) {
|
||||
rows, err := q.db.Query(ctx, getTotalHoldersByRuneIds, arg.RuneIds, arg.BlockHeight)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []GetTotalHoldersByRuneIdsRow
|
||||
for rows.Next() {
|
||||
var i GetTotalHoldersByRuneIdsRow
|
||||
if err := rows.Scan(&i.RuneID, &i.Count); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const spendOutPointBalance = `-- name: SpendOutPointBalance :exec
|
||||
UPDATE runes_outpoint_balances SET spent_height = $1 WHERE tx_hash = $2 AND tx_idx = $3
|
||||
`
|
||||
|
||||
type SpendOutPointBalancesParams struct {
|
||||
type SpendOutPointBalanceParams struct {
|
||||
SpentHeight pgtype.Int4
|
||||
TxHash string
|
||||
TxIdx int32
|
||||
}
|
||||
|
||||
func (q *Queries) SpendOutPointBalances(ctx context.Context, arg SpendOutPointBalancesParams) error {
|
||||
_, err := q.db.Exec(ctx, spendOutPointBalances, arg.SpentHeight, arg.TxHash, arg.TxIdx)
|
||||
func (q *Queries) SpendOutPointBalance(ctx context.Context, arg SpendOutPointBalanceParams) error {
|
||||
_, err := q.db.Exec(ctx, spendOutPointBalance, arg.SpentHeight, arg.TxHash, arg.TxIdx)
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.26.0
|
||||
// sqlc v1.27.0
|
||||
|
||||
package gen
|
||||
|
||||
@@ -15,7 +15,6 @@ type DBTX interface {
|
||||
Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error)
|
||||
Query(context.Context, string, ...interface{}) (pgx.Rows, error)
|
||||
QueryRow(context.Context, string, ...interface{}) pgx.Row
|
||||
SendBatch(context.Context, *pgx.Batch) pgx.BatchResults
|
||||
}
|
||||
|
||||
func New(db DBTX) *Queries {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.26.0
|
||||
// sqlc v1.27.0
|
||||
// source: info.sql
|
||||
|
||||
package gen
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.26.0
|
||||
// sqlc v1.27.0
|
||||
|
||||
package gen
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/repository/postgres/gen"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/runes"
|
||||
|
||||
"github.com/gaze-network/uint128"
|
||||
"github.com/jackc/pgx/v5/pgtype"
|
||||
"github.com/samber/lo"
|
||||
@@ -47,7 +48,7 @@ func numericFromUint128(src *uint128.Uint128) (pgtype.Numeric, error) {
|
||||
func mapIndexerStateModelToType(src gen.RunesIndexerState) entity.IndexerState {
|
||||
var createdAt time.Time
|
||||
if src.CreatedAt.Valid {
|
||||
createdAt = src.CreatedAt.Time
|
||||
createdAt = src.CreatedAt.Time.UTC()
|
||||
}
|
||||
return entity.IndexerState{
|
||||
DBVersion: src.DbVersion,
|
||||
@@ -63,7 +64,7 @@ func mapIndexerStateTypeToParams(src entity.IndexerState) gen.SetIndexerStatePar
|
||||
}
|
||||
}
|
||||
|
||||
func mapRuneEntryModelToType(src gen.GetRuneEntriesByRuneIdsRow) (runes.RuneEntry, error) {
|
||||
func mapRuneEntryModelToType(src gen.GetRuneEntriesRow) (runes.RuneEntry, error) {
|
||||
runeId, err := runes.NewRuneIdFromString(src.RuneID)
|
||||
if err != nil {
|
||||
return runes.RuneEntry{}, errors.Wrap(err, "failed to parse rune id")
|
||||
@@ -86,7 +87,7 @@ func mapRuneEntryModelToType(src gen.GetRuneEntriesByRuneIdsRow) (runes.RuneEntr
|
||||
}
|
||||
var completedAt time.Time
|
||||
if src.CompletedAt.Valid {
|
||||
completedAt = src.CompletedAt.Time
|
||||
completedAt = src.CompletedAt.Time.UTC()
|
||||
}
|
||||
var completedAtHeight *uint64
|
||||
if src.CompletedAtHeight.Valid {
|
||||
@@ -132,7 +133,7 @@ func mapRuneEntryModelToType(src gen.GetRuneEntriesByRuneIdsRow) (runes.RuneEntr
|
||||
}
|
||||
var etchedAt time.Time
|
||||
if src.EtchedAt.Valid {
|
||||
etchedAt = src.EtchedAt.Time
|
||||
etchedAt = src.EtchedAt.Time.UTC()
|
||||
}
|
||||
return runes.RuneEntry{
|
||||
RuneId: runeId,
|
||||
@@ -153,31 +154,13 @@ func mapRuneEntryModelToType(src gen.GetRuneEntriesByRuneIdsRow) (runes.RuneEntr
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mapRuneEntryTypeToParams(src runes.RuneEntry, blockHeight uint64) (gen.CreateRuneEntryParams, gen.CreateRuneEntryStateParams, error) {
|
||||
func mapRuneEntryTypeToParams(src runes.RuneEntry) (gen.CreateRuneEntryParams, error) {
|
||||
runeId := src.RuneId.String()
|
||||
rune := src.SpacedRune.Rune.String()
|
||||
spacers := int32(src.SpacedRune.Spacers)
|
||||
mints, err := numericFromUint128(&src.Mints)
|
||||
if err != nil {
|
||||
return gen.CreateRuneEntryParams{}, gen.CreateRuneEntryStateParams{}, errors.Wrap(err, "failed to parse mints")
|
||||
}
|
||||
burnedAmount, err := numericFromUint128(&src.BurnedAmount)
|
||||
if err != nil {
|
||||
return gen.CreateRuneEntryParams{}, gen.CreateRuneEntryStateParams{}, errors.Wrap(err, "failed to parse burned amount")
|
||||
}
|
||||
premine, err := numericFromUint128(&src.Premine)
|
||||
if err != nil {
|
||||
return gen.CreateRuneEntryParams{}, gen.CreateRuneEntryStateParams{}, errors.Wrap(err, "failed to parse premine")
|
||||
}
|
||||
var completedAt pgtype.Timestamp
|
||||
if !src.CompletedAt.IsZero() {
|
||||
completedAt.Time = src.CompletedAt
|
||||
completedAt.Valid = true
|
||||
}
|
||||
var completedAtHeight pgtype.Int4
|
||||
if src.CompletedAtHeight != nil {
|
||||
completedAtHeight.Int32 = int32(*src.CompletedAtHeight)
|
||||
completedAtHeight.Valid = true
|
||||
return gen.CreateRuneEntryParams{}, errors.Wrap(err, "failed to parse premine")
|
||||
}
|
||||
var terms bool
|
||||
var termsAmount, termsCap pgtype.Numeric
|
||||
@@ -187,13 +170,13 @@ func mapRuneEntryTypeToParams(src runes.RuneEntry, blockHeight uint64) (gen.Crea
|
||||
if src.Terms.Amount != nil {
|
||||
termsAmount, err = numericFromUint128(src.Terms.Amount)
|
||||
if err != nil {
|
||||
return gen.CreateRuneEntryParams{}, gen.CreateRuneEntryStateParams{}, errors.Wrap(err, "failed to parse terms amount")
|
||||
return gen.CreateRuneEntryParams{}, errors.Wrap(err, "failed to parse terms amount")
|
||||
}
|
||||
}
|
||||
if src.Terms.Cap != nil {
|
||||
termsCap, err = numericFromUint128(src.Terms.Cap)
|
||||
if err != nil {
|
||||
return gen.CreateRuneEntryParams{}, gen.CreateRuneEntryStateParams{}, errors.Wrap(err, "failed to parse terms cap")
|
||||
return gen.CreateRuneEntryParams{}, errors.Wrap(err, "failed to parse terms cap")
|
||||
}
|
||||
}
|
||||
if src.Terms.HeightStart != nil {
|
||||
@@ -221,51 +204,150 @@ func mapRuneEntryTypeToParams(src runes.RuneEntry, blockHeight uint64) (gen.Crea
|
||||
}
|
||||
}
|
||||
}
|
||||
etchedAt := pgtype.Timestamp{Time: time.Time{}, Valid: true}
|
||||
etchedAt := pgtype.Timestamp{Time: src.EtchedAt.UTC(), Valid: true}
|
||||
|
||||
return gen.CreateRuneEntryParams{
|
||||
RuneID: runeId,
|
||||
Rune: rune,
|
||||
Number: int64(src.Number),
|
||||
Spacers: spacers,
|
||||
Premine: premine,
|
||||
Symbol: src.Symbol,
|
||||
Divisibility: int16(src.Divisibility),
|
||||
Terms: terms,
|
||||
TermsAmount: termsAmount,
|
||||
TermsCap: termsCap,
|
||||
TermsHeightStart: termsHeightStart,
|
||||
TermsHeightEnd: termsHeightEnd,
|
||||
TermsOffsetStart: termsOffsetStart,
|
||||
TermsOffsetEnd: termsOffsetEnd,
|
||||
Turbo: src.Turbo,
|
||||
EtchingBlock: int32(src.EtchingBlock),
|
||||
EtchingTxHash: src.EtchingTxHash.String(),
|
||||
EtchedAt: etchedAt,
|
||||
}, gen.CreateRuneEntryStateParams{
|
||||
BlockHeight: int32(blockHeight),
|
||||
RuneID: runeId,
|
||||
Mints: mints,
|
||||
BurnedAmount: burnedAmount,
|
||||
CompletedAt: completedAt,
|
||||
CompletedAtHeight: completedAtHeight,
|
||||
}, nil
|
||||
RuneID: runeId,
|
||||
Rune: rune,
|
||||
Number: int64(src.Number),
|
||||
Spacers: spacers,
|
||||
Premine: premine,
|
||||
Symbol: src.Symbol,
|
||||
Divisibility: int16(src.Divisibility),
|
||||
Terms: terms,
|
||||
TermsAmount: termsAmount,
|
||||
TermsCap: termsCap,
|
||||
TermsHeightStart: termsHeightStart,
|
||||
TermsHeightEnd: termsHeightEnd,
|
||||
TermsOffsetStart: termsOffsetStart,
|
||||
TermsOffsetEnd: termsOffsetEnd,
|
||||
Turbo: src.Turbo,
|
||||
EtchingBlock: int32(src.EtchingBlock),
|
||||
EtchingTxHash: src.EtchingTxHash.String(),
|
||||
EtchedAt: etchedAt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// mapRuneTransactionModelToType returns params for creating a new rune transaction and (optionally) a runestone.
|
||||
func mapRuneTransactionTypeToParams(src entity.RuneTransaction) (gen.CreateRuneTransactionParams, *gen.CreateRunestoneParams, error) {
|
||||
func mapRuneEntryStatesTypeToParams(src runes.RuneEntry, blockHeight uint64) (gen.CreateRuneEntryStateParams, error) {
|
||||
runeId := src.RuneId.String()
|
||||
mints, err := numericFromUint128(&src.Mints)
|
||||
if err != nil {
|
||||
return gen.CreateRuneEntryStateParams{}, errors.Wrap(err, "failed to parse mints")
|
||||
}
|
||||
burnedAmount, err := numericFromUint128(&src.BurnedAmount)
|
||||
if err != nil {
|
||||
return gen.CreateRuneEntryStateParams{}, errors.Wrap(err, "failed to parse burned amount")
|
||||
}
|
||||
var completedAt pgtype.Timestamp
|
||||
if !src.CompletedAt.IsZero() {
|
||||
completedAt.Time = src.CompletedAt.UTC()
|
||||
completedAt.Valid = true
|
||||
}
|
||||
var completedAtHeight pgtype.Int4
|
||||
if src.CompletedAtHeight != nil {
|
||||
completedAtHeight.Int32 = int32(*src.CompletedAtHeight)
|
||||
completedAtHeight.Valid = true
|
||||
}
|
||||
|
||||
return gen.CreateRuneEntryStateParams{
|
||||
BlockHeight: int32(blockHeight),
|
||||
RuneID: runeId,
|
||||
Mints: mints,
|
||||
BurnedAmount: burnedAmount,
|
||||
CompletedAt: completedAt,
|
||||
CompletedAtHeight: completedAtHeight,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mapRuneEntryTypeToParamsBatch(srcs []*runes.RuneEntry) (gen.BatchCreateRuneEntriesPatchedParams, error) {
|
||||
var batchParams gen.BatchCreateRuneEntriesPatchedParams
|
||||
batchParams.RuneIDArr = make([]string, 0, len(srcs))
|
||||
batchParams.RuneArr = make([]string, 0, len(srcs))
|
||||
batchParams.NumberArr = make([]int64, 0, len(srcs))
|
||||
batchParams.SpacersArr = make([]int32, 0, len(srcs))
|
||||
batchParams.PremineArr = make([]pgtype.Numeric, 0, len(srcs))
|
||||
batchParams.SymbolArr = make([]int32, 0, len(srcs))
|
||||
batchParams.DivisibilityArr = make([]int16, 0, len(srcs))
|
||||
batchParams.TermsArr = make([]bool, 0, len(srcs))
|
||||
batchParams.TermsAmountArr = make([]pgtype.Numeric, 0, len(srcs))
|
||||
batchParams.TermsCapArr = make([]pgtype.Numeric, 0, len(srcs))
|
||||
batchParams.TermsHeightStartArr = make([]pgtype.Int4, 0, len(srcs))
|
||||
batchParams.TermsHeightEndArr = make([]pgtype.Int4, 0, len(srcs))
|
||||
batchParams.TermsOffsetStartArr = make([]pgtype.Int4, 0, len(srcs))
|
||||
batchParams.TermsOffsetEndArr = make([]pgtype.Int4, 0, len(srcs))
|
||||
batchParams.TurboArr = make([]bool, 0, len(srcs))
|
||||
batchParams.EtchingBlockArr = make([]int32, 0, len(srcs))
|
||||
batchParams.EtchingTxHashArr = make([]string, 0, len(srcs))
|
||||
batchParams.EtchedAtArr = make([]pgtype.Timestamp, 0, len(srcs))
|
||||
|
||||
for i, src := range srcs {
|
||||
param, err := mapRuneEntryTypeToParams(*src)
|
||||
if err != nil {
|
||||
return gen.BatchCreateRuneEntriesPatchedParams{}, errors.Wrapf(err, "failed to map rune entry to params batch at index %d", i)
|
||||
}
|
||||
|
||||
batchParams.RuneIDArr = append(batchParams.RuneIDArr, param.RuneID)
|
||||
batchParams.RuneArr = append(batchParams.RuneArr, param.Rune)
|
||||
batchParams.NumberArr = append(batchParams.NumberArr, param.Number)
|
||||
batchParams.SpacersArr = append(batchParams.SpacersArr, param.Spacers)
|
||||
batchParams.PremineArr = append(batchParams.PremineArr, param.Premine)
|
||||
batchParams.SymbolArr = append(batchParams.SymbolArr, param.Symbol)
|
||||
batchParams.DivisibilityArr = append(batchParams.DivisibilityArr, param.Divisibility)
|
||||
batchParams.TermsArr = append(batchParams.TermsArr, param.Terms)
|
||||
batchParams.TermsAmountArr = append(batchParams.TermsAmountArr, param.TermsAmount)
|
||||
batchParams.TermsCapArr = append(batchParams.TermsCapArr, param.TermsCap)
|
||||
batchParams.TermsHeightStartArr = append(batchParams.TermsHeightStartArr, param.TermsHeightStart)
|
||||
batchParams.TermsHeightEndArr = append(batchParams.TermsHeightEndArr, param.TermsHeightEnd)
|
||||
batchParams.TermsOffsetStartArr = append(batchParams.TermsOffsetStartArr, param.TermsOffsetStart)
|
||||
batchParams.TermsOffsetEndArr = append(batchParams.TermsOffsetEndArr, param.TermsOffsetEnd)
|
||||
batchParams.TurboArr = append(batchParams.TurboArr, param.Turbo)
|
||||
batchParams.EtchingBlockArr = append(batchParams.EtchingBlockArr, param.EtchingBlock)
|
||||
batchParams.EtchingTxHashArr = append(batchParams.EtchingTxHashArr, param.EtchingTxHash)
|
||||
batchParams.EtchedAtArr = append(batchParams.EtchedAtArr, param.EtchedAt)
|
||||
}
|
||||
|
||||
return batchParams, nil
|
||||
}
|
||||
|
||||
func mapRuneEntryStatesTypeToParamsBatch(srcs []*runes.RuneEntry, blockHeight uint64) (gen.BatchCreateRuneEntryStatesPatchedParams, error) {
|
||||
var batchParams gen.BatchCreateRuneEntryStatesPatchedParams
|
||||
batchParams.RuneIDArr = make([]string, 0, len(srcs))
|
||||
batchParams.BlockHeightArr = make([]int32, 0, len(srcs))
|
||||
batchParams.MintsArr = make([]pgtype.Numeric, 0, len(srcs))
|
||||
batchParams.BurnedAmountArr = make([]pgtype.Numeric, 0, len(srcs))
|
||||
batchParams.CompletedAtArr = make([]pgtype.Timestamp, 0, len(srcs))
|
||||
batchParams.CompletedAtHeightArr = make([]pgtype.Int4, 0, len(srcs))
|
||||
|
||||
for i, src := range srcs {
|
||||
param, err := mapRuneEntryStatesTypeToParams(*src, blockHeight)
|
||||
if err != nil {
|
||||
return gen.BatchCreateRuneEntryStatesPatchedParams{}, errors.Wrapf(err, "failed to map rune entry states to params batch at index %d", i)
|
||||
}
|
||||
|
||||
batchParams.RuneIDArr = append(batchParams.RuneIDArr, param.RuneID)
|
||||
batchParams.BlockHeightArr = append(batchParams.BlockHeightArr, param.BlockHeight)
|
||||
batchParams.MintsArr = append(batchParams.MintsArr, param.Mints)
|
||||
batchParams.BurnedAmountArr = append(batchParams.BurnedAmountArr, param.BurnedAmount)
|
||||
batchParams.CompletedAtArr = append(batchParams.CompletedAtArr, param.CompletedAt)
|
||||
batchParams.CompletedAtHeightArr = append(batchParams.CompletedAtHeightArr, param.CompletedAtHeight)
|
||||
}
|
||||
|
||||
return batchParams, nil
|
||||
}
|
||||
|
||||
func mapRuneTransactionTypeToParams(src entity.RuneTransaction) (gen.CreateRuneTransactionParams, error) {
|
||||
var timestamp pgtype.Timestamp
|
||||
if !src.Timestamp.IsZero() {
|
||||
timestamp.Time = src.Timestamp
|
||||
timestamp.Time = src.Timestamp.UTC()
|
||||
timestamp.Valid = true
|
||||
}
|
||||
inputsBytes, err := json.Marshal(src.Inputs)
|
||||
if err != nil {
|
||||
return gen.CreateRuneTransactionParams{}, nil, errors.Wrap(err, "failed to marshal inputs")
|
||||
return gen.CreateRuneTransactionParams{}, errors.Wrap(err, "failed to marshal inputs")
|
||||
}
|
||||
outputsBytes, err := json.Marshal(src.Outputs)
|
||||
if err != nil {
|
||||
return gen.CreateRuneTransactionParams{}, nil, errors.Wrap(err, "failed to marshal outputs")
|
||||
return gen.CreateRuneTransactionParams{}, errors.Wrap(err, "failed to marshal outputs")
|
||||
}
|
||||
mints := make(map[string]uint128.Uint128)
|
||||
for key, value := range src.Mints {
|
||||
@@ -273,7 +355,7 @@ func mapRuneTransactionTypeToParams(src entity.RuneTransaction) (gen.CreateRuneT
|
||||
}
|
||||
mintsBytes, err := json.Marshal(mints)
|
||||
if err != nil {
|
||||
return gen.CreateRuneTransactionParams{}, nil, errors.Wrap(err, "failed to marshal mints")
|
||||
return gen.CreateRuneTransactionParams{}, errors.Wrap(err, "failed to marshal mints")
|
||||
}
|
||||
burns := make(map[string]uint128.Uint128)
|
||||
for key, value := range src.Burns {
|
||||
@@ -281,16 +363,7 @@ func mapRuneTransactionTypeToParams(src entity.RuneTransaction) (gen.CreateRuneT
|
||||
}
|
||||
burnsBytes, err := json.Marshal(burns)
|
||||
if err != nil {
|
||||
return gen.CreateRuneTransactionParams{}, nil, errors.Wrap(err, "failed to marshal burns")
|
||||
}
|
||||
|
||||
var runestoneParams *gen.CreateRunestoneParams
|
||||
if src.Runestone != nil {
|
||||
params, err := mapRunestoneTypeToParams(*src.Runestone, src.Hash, src.BlockHeight)
|
||||
if err != nil {
|
||||
return gen.CreateRuneTransactionParams{}, nil, errors.Wrap(err, "failed to map runestone to params")
|
||||
}
|
||||
runestoneParams = ¶ms
|
||||
return gen.CreateRuneTransactionParams{}, errors.Wrap(err, "failed to marshal burns")
|
||||
}
|
||||
|
||||
return gen.CreateRuneTransactionParams{
|
||||
@@ -303,7 +376,46 @@ func mapRuneTransactionTypeToParams(src entity.RuneTransaction) (gen.CreateRuneT
|
||||
Mints: mintsBytes,
|
||||
Burns: burnsBytes,
|
||||
RuneEtched: src.RuneEtched,
|
||||
}, runestoneParams, nil
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mapRuneTransactionTypeToParamsBatch(srcs []*entity.RuneTransaction) (gen.BatchCreateRuneTransactionsParams, error) {
|
||||
batchParams := gen.BatchCreateRuneTransactionsParams{
|
||||
HashArr: make([]string, 0, len(srcs)),
|
||||
BlockHeightArr: make([]int32, 0, len(srcs)),
|
||||
IndexArr: make([]int32, 0, len(srcs)),
|
||||
TimestampArr: make([]pgtype.Timestamp, 0, len(srcs)),
|
||||
RuneEtchedArr: make([]bool, 0, len(srcs)),
|
||||
}
|
||||
inputsArr := make([][]byte, 0, len(srcs))
|
||||
outputsArr := make([][]byte, 0, len(srcs))
|
||||
mintsArr := make([][]byte, 0, len(srcs))
|
||||
burnsArr := make([][]byte, 0, len(srcs))
|
||||
|
||||
for i, src := range srcs {
|
||||
param, err := mapRuneTransactionTypeToParams(*src)
|
||||
if err != nil {
|
||||
return gen.BatchCreateRuneTransactionsParams{}, errors.Wrapf(err, "failed to map rune transaction to params batch at index %d", i)
|
||||
}
|
||||
|
||||
batchParams.HashArr = append(batchParams.HashArr, param.Hash)
|
||||
batchParams.BlockHeightArr = append(batchParams.BlockHeightArr, param.BlockHeight)
|
||||
batchParams.IndexArr = append(batchParams.IndexArr, param.Index)
|
||||
batchParams.TimestampArr = append(batchParams.TimestampArr, param.Timestamp)
|
||||
batchParams.RuneEtchedArr = append(batchParams.RuneEtchedArr, param.RuneEtched)
|
||||
|
||||
inputsArr = append(inputsArr, param.Inputs)
|
||||
outputsArr = append(outputsArr, param.Outputs)
|
||||
mintsArr = append(mintsArr, param.Mints)
|
||||
burnsArr = append(burnsArr, param.Burns)
|
||||
}
|
||||
|
||||
batchParams.InputsArr = inputsArr
|
||||
batchParams.OutputsArr = outputsArr
|
||||
batchParams.MintsArr = mintsArr
|
||||
batchParams.BurnsArr = burnsArr
|
||||
|
||||
return batchParams, nil
|
||||
}
|
||||
|
||||
func extractModelRuneTxAndRunestone(src gen.GetRuneTransactionsRow) (gen.RunesTransaction, *gen.RunesRunestone, error) {
|
||||
@@ -488,6 +600,65 @@ func mapRunestoneTypeToParams(src runes.Runestone, txHash chainhash.Hash, blockH
|
||||
return runestoneParams, nil
|
||||
}
|
||||
|
||||
func mapRunestoneTypeToParamsBatch(srcs []*entity.RuneTransaction) (gen.BatchCreateRunestonesPatchedParams, error) {
|
||||
var batchParams gen.BatchCreateRunestonesPatchedParams
|
||||
batchParams.TxHashArr = make([]string, 0, len(srcs))
|
||||
batchParams.BlockHeightArr = make([]int32, 0, len(srcs))
|
||||
batchParams.EtchingArr = make([]bool, 0, len(srcs))
|
||||
batchParams.EtchingDivisibilityArr = make([]pgtype.Int2, 0, len(srcs))
|
||||
batchParams.EtchingPremineArr = make([]pgtype.Numeric, 0, len(srcs))
|
||||
batchParams.EtchingRuneArr = make([]pgtype.Text, 0, len(srcs))
|
||||
batchParams.EtchingSpacersArr = make([]pgtype.Int4, 0, len(srcs))
|
||||
batchParams.EtchingSymbolArr = make([]pgtype.Int4, 0, len(srcs))
|
||||
batchParams.EtchingTermsArr = make([]pgtype.Bool, 0, len(srcs))
|
||||
batchParams.EtchingTermsAmountArr = make([]pgtype.Numeric, 0, len(srcs))
|
||||
batchParams.EtchingTermsCapArr = make([]pgtype.Numeric, 0, len(srcs))
|
||||
batchParams.EtchingTermsHeightStartArr = make([]pgtype.Int4, 0, len(srcs))
|
||||
batchParams.EtchingTermsHeightEndArr = make([]pgtype.Int4, 0, len(srcs))
|
||||
batchParams.EtchingTermsOffsetStartArr = make([]pgtype.Int4, 0, len(srcs))
|
||||
batchParams.EtchingTermsOffsetEndArr = make([]pgtype.Int4, 0, len(srcs))
|
||||
batchParams.EtchingTurboArr = make([]pgtype.Bool, 0, len(srcs))
|
||||
batchParams.EdictsArr = make([][]byte, 0, len(srcs))
|
||||
batchParams.MintArr = make([]pgtype.Text, 0, len(srcs))
|
||||
batchParams.PointerArr = make([]pgtype.Int4, 0, len(srcs))
|
||||
batchParams.CenotaphArr = make([]bool, 0, len(srcs))
|
||||
batchParams.FlawsArr = make([]int32, 0, len(srcs))
|
||||
|
||||
for i, src := range srcs {
|
||||
if src.Runestone == nil {
|
||||
continue
|
||||
}
|
||||
param, err := mapRunestoneTypeToParams(*src.Runestone, src.Hash, src.BlockHeight)
|
||||
if err != nil {
|
||||
return gen.BatchCreateRunestonesPatchedParams{}, errors.Wrapf(err, "failed to map runestone to params batch at index %d", i)
|
||||
}
|
||||
|
||||
batchParams.TxHashArr = append(batchParams.TxHashArr, param.TxHash)
|
||||
batchParams.BlockHeightArr = append(batchParams.BlockHeightArr, param.BlockHeight)
|
||||
batchParams.EtchingArr = append(batchParams.EtchingArr, param.Etching)
|
||||
batchParams.EtchingDivisibilityArr = append(batchParams.EtchingDivisibilityArr, param.EtchingDivisibility)
|
||||
batchParams.EtchingPremineArr = append(batchParams.EtchingPremineArr, param.EtchingPremine)
|
||||
batchParams.EtchingRuneArr = append(batchParams.EtchingRuneArr, param.EtchingRune)
|
||||
batchParams.EtchingSpacersArr = append(batchParams.EtchingSpacersArr, param.EtchingSpacers)
|
||||
batchParams.EtchingSymbolArr = append(batchParams.EtchingSymbolArr, param.EtchingSymbol)
|
||||
batchParams.EtchingTermsArr = append(batchParams.EtchingTermsArr, param.EtchingTerms)
|
||||
batchParams.EtchingTermsAmountArr = append(batchParams.EtchingTermsAmountArr, param.EtchingTermsAmount)
|
||||
batchParams.EtchingTermsCapArr = append(batchParams.EtchingTermsCapArr, param.EtchingTermsCap)
|
||||
batchParams.EtchingTermsHeightStartArr = append(batchParams.EtchingTermsHeightStartArr, param.EtchingTermsHeightStart)
|
||||
batchParams.EtchingTermsHeightEndArr = append(batchParams.EtchingTermsHeightEndArr, param.EtchingTermsHeightEnd)
|
||||
batchParams.EtchingTermsOffsetStartArr = append(batchParams.EtchingTermsOffsetStartArr, param.EtchingTermsOffsetStart)
|
||||
batchParams.EtchingTermsOffsetEndArr = append(batchParams.EtchingTermsOffsetEndArr, param.EtchingTermsOffsetEnd)
|
||||
batchParams.EtchingTurboArr = append(batchParams.EtchingTurboArr, param.EtchingTurbo)
|
||||
batchParams.EdictsArr = append(batchParams.EdictsArr, param.Edicts)
|
||||
batchParams.MintArr = append(batchParams.MintArr, param.Mint)
|
||||
batchParams.PointerArr = append(batchParams.PointerArr, param.Pointer)
|
||||
batchParams.CenotaphArr = append(batchParams.CenotaphArr, param.Cenotaph)
|
||||
batchParams.FlawsArr = append(batchParams.FlawsArr, param.Flaws)
|
||||
}
|
||||
|
||||
return batchParams, nil
|
||||
}
|
||||
|
||||
func mapRunestoneModelToType(src gen.RunesRunestone) (runes.Runestone, error) {
|
||||
runestone := runes.Runestone{
|
||||
Cenotaph: src.Cenotaph,
|
||||
@@ -602,6 +773,42 @@ func mapBalanceModelToType(src gen.RunesBalance) (*entity.Balance, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mapBalanceTypeToParams(src entity.Balance) (gen.CreateRuneBalanceParams, error) {
|
||||
amount, err := numericFromUint128(&src.Amount)
|
||||
if err != nil {
|
||||
return gen.CreateRuneBalanceParams{}, errors.Wrap(err, "failed to parse amount")
|
||||
}
|
||||
return gen.CreateRuneBalanceParams{
|
||||
RuneID: src.RuneId.String(),
|
||||
Amount: amount,
|
||||
Pkscript: hex.EncodeToString(src.PkScript),
|
||||
BlockHeight: int32(src.BlockHeight),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mapBalanceTypeToParamsBatch(srcs []*entity.Balance) (gen.BatchCreateRunesBalancesParams, error) {
|
||||
batchParams := gen.BatchCreateRunesBalancesParams{
|
||||
RuneIDArr: make([]string, 0, len(srcs)),
|
||||
AmountArr: make([]pgtype.Numeric, 0, len(srcs)),
|
||||
PkscriptArr: make([]string, 0, len(srcs)),
|
||||
BlockHeightArr: make([]int32, 0, len(srcs)),
|
||||
}
|
||||
|
||||
for i, src := range srcs {
|
||||
param, err := mapBalanceTypeToParams(*src)
|
||||
if err != nil {
|
||||
return gen.BatchCreateRunesBalancesParams{}, errors.Wrapf(err, "failed to map balance to params batch at index %d", i)
|
||||
}
|
||||
|
||||
batchParams.RuneIDArr = append(batchParams.RuneIDArr, param.RuneID)
|
||||
batchParams.AmountArr = append(batchParams.AmountArr, param.Amount)
|
||||
batchParams.PkscriptArr = append(batchParams.PkscriptArr, param.Pkscript)
|
||||
batchParams.BlockHeightArr = append(batchParams.BlockHeightArr, param.BlockHeight)
|
||||
}
|
||||
|
||||
return batchParams, nil
|
||||
}
|
||||
|
||||
func mapIndexedBlockModelToType(src gen.RunesIndexedBlock) (*entity.IndexedBlock, error) {
|
||||
hash, err := chainhash.NewHashFromStr(src.Hash)
|
||||
if err != nil {
|
||||
@@ -638,6 +845,72 @@ func mapIndexedBlockTypeToParams(src entity.IndexedBlock) (gen.CreateIndexedBloc
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mapRunesUTXOModelToType(src gen.GetRunesUTXOsByPkScriptRow) (entity.RunesUTXO, error) {
|
||||
pkScriptRaw, ok := src.Pkscript.(string)
|
||||
if !ok {
|
||||
return entity.RunesUTXO{}, errors.New("pkscript from database is not string")
|
||||
}
|
||||
pkScript, err := hex.DecodeString(pkScriptRaw)
|
||||
if err != nil {
|
||||
return entity.RunesUTXO{}, errors.Wrap(err, "failed to parse pkscript")
|
||||
}
|
||||
txHash, err := chainhash.NewHashFromStr(src.TxHash)
|
||||
if err != nil {
|
||||
return entity.RunesUTXO{}, errors.Wrap(err, "failed to parse tx hash")
|
||||
}
|
||||
runeIdsRaw, ok := src.RuneIds.([]interface{})
|
||||
if !ok {
|
||||
return entity.RunesUTXO{}, errors.New("src.RuneIds is not a slice")
|
||||
}
|
||||
runeIds := make([]string, 0, len(runeIdsRaw))
|
||||
for i, raw := range runeIdsRaw {
|
||||
runeId, ok := raw.(string)
|
||||
if !ok {
|
||||
return entity.RunesUTXO{}, errors.Errorf("src.RuneIds[%d] is not a string", i)
|
||||
}
|
||||
runeIds = append(runeIds, runeId)
|
||||
}
|
||||
amountsRaw, ok := src.Amounts.([]interface{})
|
||||
if !ok {
|
||||
return entity.RunesUTXO{}, errors.New("amounts from database is not a slice")
|
||||
}
|
||||
amounts := make([]pgtype.Numeric, 0, len(amountsRaw))
|
||||
for i, raw := range amountsRaw {
|
||||
amount, ok := raw.(pgtype.Numeric)
|
||||
if !ok {
|
||||
return entity.RunesUTXO{}, errors.Errorf("src.Amounts[%d] is not pgtype.Numeric", i)
|
||||
}
|
||||
amounts = append(amounts, amount)
|
||||
}
|
||||
if len(runeIds) != len(amounts) {
|
||||
return entity.RunesUTXO{}, errors.New("rune ids and amounts have different lengths")
|
||||
}
|
||||
|
||||
runesBalances := make([]entity.RunesUTXOBalance, 0, len(runeIds))
|
||||
for i := range runeIds {
|
||||
runeId, err := runes.NewRuneIdFromString(runeIds[i])
|
||||
if err != nil {
|
||||
return entity.RunesUTXO{}, errors.Wrap(err, "failed to parse rune id")
|
||||
}
|
||||
amount, err := uint128FromNumeric(amounts[i])
|
||||
if err != nil {
|
||||
return entity.RunesUTXO{}, errors.Wrap(err, "failed to parse amount")
|
||||
}
|
||||
runesBalances = append(runesBalances, entity.RunesUTXOBalance{
|
||||
RuneId: runeId,
|
||||
Amount: lo.FromPtr(amount),
|
||||
})
|
||||
}
|
||||
return entity.RunesUTXO{
|
||||
PkScript: pkScript,
|
||||
OutPoint: wire.OutPoint{
|
||||
Hash: *txHash,
|
||||
Index: uint32(src.TxIdx),
|
||||
},
|
||||
RuneBalances: runesBalances,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mapOutPointBalanceModelToType(src gen.RunesOutpointBalance) (entity.OutPointBalance, error) {
|
||||
runeId, err := runes.NewRuneIdFromString(src.RuneID)
|
||||
if err != nil {
|
||||
@@ -672,16 +945,16 @@ func mapOutPointBalanceModelToType(src gen.RunesOutpointBalance) (entity.OutPoin
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mapOutPointBalanceTypeToParams(src entity.OutPointBalance) (gen.CreateOutPointBalancesParams, error) {
|
||||
func mapOutPointBalanceTypeToParams(src entity.OutPointBalance) (gen.CreateOutPointBalanceParams, error) {
|
||||
amount, err := numericFromUint128(&src.Amount)
|
||||
if err != nil {
|
||||
return gen.CreateOutPointBalancesParams{}, errors.Wrap(err, "failed to parse amount")
|
||||
return gen.CreateOutPointBalanceParams{}, errors.Wrap(err, "failed to parse amount")
|
||||
}
|
||||
var spentHeight pgtype.Int4
|
||||
if src.SpentHeight != nil {
|
||||
spentHeight = pgtype.Int4{Int32: int32(*src.SpentHeight), Valid: true}
|
||||
}
|
||||
return gen.CreateOutPointBalancesParams{
|
||||
return gen.CreateOutPointBalanceParams{
|
||||
TxHash: src.OutPoint.Hash.String(),
|
||||
TxIdx: int32(src.OutPoint.Index),
|
||||
Pkscript: hex.EncodeToString(src.PkScript),
|
||||
@@ -691,3 +964,31 @@ func mapOutPointBalanceTypeToParams(src entity.OutPointBalance) (gen.CreateOutPo
|
||||
SpentHeight: spentHeight,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mapOutPointBalanceTypeToParamsBatch(srcs []*entity.OutPointBalance) (gen.BatchCreateRunesOutpointBalancesPatchedParams, error) {
|
||||
var batchParams gen.BatchCreateRunesOutpointBalancesPatchedParams
|
||||
batchParams.TxHashArr = make([]string, 0, len(srcs))
|
||||
batchParams.TxIdxArr = make([]int32, 0, len(srcs))
|
||||
batchParams.PkscriptArr = make([]string, 0, len(srcs))
|
||||
batchParams.RuneIDArr = make([]string, 0, len(srcs))
|
||||
batchParams.AmountArr = make([]pgtype.Numeric, 0, len(srcs))
|
||||
batchParams.BlockHeightArr = make([]int32, 0, len(srcs))
|
||||
batchParams.SpentHeightArr = make([]pgtype.Int4, 0, len(srcs))
|
||||
|
||||
for i, src := range srcs {
|
||||
param, err := mapOutPointBalanceTypeToParams(*src)
|
||||
if err != nil {
|
||||
return gen.BatchCreateRunesOutpointBalancesPatchedParams{}, errors.Wrapf(err, "failed to map outpoint balance to params batch at index %d", i)
|
||||
}
|
||||
|
||||
batchParams.TxHashArr = append(batchParams.TxHashArr, param.TxHash)
|
||||
batchParams.TxIdxArr = append(batchParams.TxIdxArr, param.TxIdx)
|
||||
batchParams.PkscriptArr = append(batchParams.PkscriptArr, param.Pkscript)
|
||||
batchParams.RuneIDArr = append(batchParams.RuneIDArr, param.RuneID)
|
||||
batchParams.AmountArr = append(batchParams.AmountArr, param.Amount)
|
||||
batchParams.BlockHeightArr = append(batchParams.BlockHeightArr, param.BlockHeight)
|
||||
batchParams.SpentHeightArr = append(batchParams.SpentHeightArr, param.SpentHeight)
|
||||
}
|
||||
|
||||
return batchParams, nil
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
@@ -62,7 +63,18 @@ func (r *Repository) GetIndexedBlockByHeight(ctx context.Context, height int64)
|
||||
return indexedBlock, nil
|
||||
}
|
||||
|
||||
func (r *Repository) GetRuneTransactions(ctx context.Context, pkScript []byte, runeId runes.RuneId, fromBlock, toBlock uint64) ([]*entity.RuneTransaction, error) {
|
||||
const maxRuneTransactionsLimit = 10000 // temporary limit to prevent large queries from overwhelming the database
|
||||
|
||||
func (r *Repository) GetRuneTransactions(ctx context.Context, pkScript []byte, runeId runes.RuneId, fromBlock, toBlock uint64, limit int32, offset int32) ([]*entity.RuneTransaction, error) {
|
||||
if limit == -1 {
|
||||
limit = maxRuneTransactionsLimit
|
||||
}
|
||||
if limit < 0 {
|
||||
return nil, errors.Wrap(errs.InvalidArgument, "limit must be -1 or non-negative")
|
||||
}
|
||||
if limit > maxRuneTransactionsLimit {
|
||||
return nil, errors.Wrapf(errs.InvalidArgument, "limit cannot exceed %d", maxRuneTransactionsLimit)
|
||||
}
|
||||
pkScriptParam := []byte(fmt.Sprintf(`[{"pkScript":"%s"}]`, hex.EncodeToString(pkScript)))
|
||||
runeIdParam := []byte(fmt.Sprintf(`[{"runeId":"%s"}]`, runeId.String()))
|
||||
rows, err := r.queries.GetRuneTransactions(ctx, gen.GetRuneTransactionsParams{
|
||||
@@ -77,6 +89,9 @@ func (r *Repository) GetRuneTransactions(ctx context.Context, pkScript []byte, r
|
||||
|
||||
FromBlock: int32(fromBlock),
|
||||
ToBlock: int32(toBlock),
|
||||
|
||||
Limit: limit,
|
||||
Offset: offset,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error during query")
|
||||
@@ -105,6 +120,36 @@ func (r *Repository) GetRuneTransactions(ctx context.Context, pkScript []byte, r
|
||||
return runeTxs, nil
|
||||
}
|
||||
|
||||
func (r *Repository) GetRuneTransaction(ctx context.Context, txHash chainhash.Hash) (*entity.RuneTransaction, error) {
|
||||
row, err := r.queries.GetRuneTransaction(ctx, txHash.String())
|
||||
if err != nil {
|
||||
if errors.Is(err, pgx.ErrNoRows) {
|
||||
return nil, errors.WithStack(errs.NotFound)
|
||||
}
|
||||
return nil, errors.Wrap(err, "error during query")
|
||||
}
|
||||
|
||||
runeTxModel, runestoneModel, err := extractModelRuneTxAndRunestone(gen.GetRuneTransactionsRow(row))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to extract rune transaction and runestone from row")
|
||||
}
|
||||
|
||||
runeTx, err := mapRuneTransactionModelToType(runeTxModel)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse rune transaction model")
|
||||
}
|
||||
|
||||
if runestoneModel != nil {
|
||||
runestone, err := mapRunestoneModelToType(*runestoneModel)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse runestone model")
|
||||
}
|
||||
runeTx.Runestone = &runestone
|
||||
}
|
||||
|
||||
return &runeTx, nil
|
||||
}
|
||||
|
||||
func (r *Repository) GetRunesBalancesAtOutPoint(ctx context.Context, outPoint wire.OutPoint) (map[runes.RuneId]*entity.OutPointBalance, error) {
|
||||
balances, err := r.queries.GetOutPointBalancesAtOutPoint(ctx, gen.GetOutPointBalancesAtOutPointParams{
|
||||
TxHash: outPoint.Hash.String(),
|
||||
@@ -125,22 +170,59 @@ func (r *Repository) GetRunesBalancesAtOutPoint(ctx context.Context, outPoint wi
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (r *Repository) GetUnspentOutPointBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) ([]*entity.OutPointBalance, error) {
|
||||
balances, err := r.queries.GetUnspentOutPointBalancesByPkScript(ctx, gen.GetUnspentOutPointBalancesByPkScriptParams{
|
||||
func (r *Repository) GetRunesUTXOsByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64, limit int32, offset int32) ([]*entity.RunesUTXO, error) {
|
||||
if limit == -1 {
|
||||
limit = math.MaxInt32
|
||||
}
|
||||
if limit < 0 {
|
||||
return nil, errors.Wrap(errs.InvalidArgument, "limit must be -1 or non-negative")
|
||||
}
|
||||
rows, err := r.queries.GetRunesUTXOsByPkScript(ctx, gen.GetRunesUTXOsByPkScriptParams{
|
||||
Pkscript: hex.EncodeToString(pkScript),
|
||||
BlockHeight: int32(blockHeight),
|
||||
Limit: limit,
|
||||
Offset: offset,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error during query")
|
||||
}
|
||||
|
||||
result := make([]*entity.OutPointBalance, 0, len(balances))
|
||||
for _, balanceModel := range balances {
|
||||
balance, err := mapOutPointBalanceModelToType(balanceModel)
|
||||
result := make([]*entity.RunesUTXO, 0, len(rows))
|
||||
for _, row := range rows {
|
||||
utxo, err := mapRunesUTXOModelToType(row)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse balance model")
|
||||
return nil, errors.Wrap(err, "failed to parse row model")
|
||||
}
|
||||
result = append(result, &balance)
|
||||
result = append(result, &utxo)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (r *Repository) GetRunesUTXOsByRuneIdAndPkScript(ctx context.Context, runeId runes.RuneId, pkScript []byte, blockHeight uint64, limit int32, offset int32) ([]*entity.RunesUTXO, error) {
|
||||
if limit == -1 {
|
||||
limit = math.MaxInt32
|
||||
}
|
||||
if limit < 0 {
|
||||
return nil, errors.Wrap(errs.InvalidArgument, "limit must be -1 or non-negative")
|
||||
}
|
||||
rows, err := r.queries.GetRunesUTXOsByRuneIdAndPkScript(ctx, gen.GetRunesUTXOsByRuneIdAndPkScriptParams{
|
||||
Pkscript: hex.EncodeToString(pkScript),
|
||||
BlockHeight: int32(blockHeight),
|
||||
RuneIds: []string{runeId.String()},
|
||||
Limit: limit,
|
||||
Offset: offset,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error during query")
|
||||
}
|
||||
|
||||
result := make([]*entity.RunesUTXO, 0, len(rows))
|
||||
for _, row := range rows {
|
||||
utxo, err := mapRunesUTXOModelToType(gen.GetRunesUTXOsByPkScriptRow(row))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse row")
|
||||
}
|
||||
result = append(result, &utxo)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
@@ -183,7 +265,7 @@ func (r *Repository) GetRuneEntryByRuneIdBatch(ctx context.Context, runeIds []ru
|
||||
runeEntries := make(map[runes.RuneId]*runes.RuneEntry, len(rows))
|
||||
var errs []error
|
||||
for i, runeEntryModel := range rows {
|
||||
runeEntry, err := mapRuneEntryModelToType(runeEntryModel)
|
||||
runeEntry, err := mapRuneEntryModelToType(gen.GetRuneEntriesRow(runeEntryModel))
|
||||
if err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "failed to parse rune entry model index %d", i))
|
||||
continue
|
||||
@@ -223,7 +305,7 @@ func (r *Repository) GetRuneEntryByRuneIdAndHeightBatch(ctx context.Context, run
|
||||
runeEntries := make(map[runes.RuneId]*runes.RuneEntry, len(rows))
|
||||
var errs []error
|
||||
for i, runeEntryModel := range rows {
|
||||
runeEntry, err := mapRuneEntryModelToType(gen.GetRuneEntriesByRuneIdsRow(runeEntryModel))
|
||||
runeEntry, err := mapRuneEntryModelToType(gen.GetRuneEntriesRow(runeEntryModel))
|
||||
if err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "failed to parse rune entry model index %d", i))
|
||||
continue
|
||||
@@ -237,6 +319,62 @@ func (r *Repository) GetRuneEntryByRuneIdAndHeightBatch(ctx context.Context, run
|
||||
return runeEntries, nil
|
||||
}
|
||||
|
||||
func (r *Repository) GetRuneEntries(ctx context.Context, search string, blockHeight uint64, limit int32, offset int32) ([]*runes.RuneEntry, error) {
|
||||
rows, err := r.queries.GetRuneEntries(ctx, gen.GetRuneEntriesParams{
|
||||
Search: search,
|
||||
Height: int32(blockHeight),
|
||||
Limit: limit,
|
||||
Offset: offset,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error during query")
|
||||
}
|
||||
|
||||
runeEntries := make([]*runes.RuneEntry, 0, len(rows))
|
||||
var errs []error
|
||||
for i, model := range rows {
|
||||
runeEntry, err := mapRuneEntryModelToType(model)
|
||||
if err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "failed to parse rune entry model index %d", i))
|
||||
continue
|
||||
}
|
||||
runeEntries = append(runeEntries, &runeEntry)
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return nil, errors.Join(errs...)
|
||||
}
|
||||
|
||||
return runeEntries, nil
|
||||
}
|
||||
|
||||
func (r *Repository) GetOngoingRuneEntries(ctx context.Context, search string, blockHeight uint64, limit int32, offset int32) ([]*runes.RuneEntry, error) {
|
||||
rows, err := r.queries.GetOngoingRuneEntries(ctx, gen.GetOngoingRuneEntriesParams{
|
||||
Search: search,
|
||||
Height: int32(blockHeight),
|
||||
Limit: limit,
|
||||
Offset: offset,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error during query")
|
||||
}
|
||||
|
||||
runeEntries := make([]*runes.RuneEntry, 0, len(rows))
|
||||
var errs []error
|
||||
for i, model := range rows {
|
||||
runeEntry, err := mapRuneEntryModelToType(gen.GetRuneEntriesRow(model))
|
||||
if err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "failed to parse rune entry model index %d", i))
|
||||
continue
|
||||
}
|
||||
runeEntries = append(runeEntries, &runeEntry)
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return nil, errors.Join(errs...)
|
||||
}
|
||||
|
||||
return runeEntries, nil
|
||||
}
|
||||
|
||||
func (r *Repository) CountRuneEntries(ctx context.Context) (uint64, error) {
|
||||
count, err := r.queries.CountRuneEntries(ctx)
|
||||
if err != nil {
|
||||
@@ -245,30 +383,46 @@ func (r *Repository) CountRuneEntries(ctx context.Context) (uint64, error) {
|
||||
return uint64(count), nil
|
||||
}
|
||||
|
||||
func (r *Repository) GetBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) (map[runes.RuneId]*entity.Balance, error) {
|
||||
func (r *Repository) GetBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64, limit int32, offset int32) ([]*entity.Balance, error) {
|
||||
if limit == -1 {
|
||||
limit = math.MaxInt32
|
||||
}
|
||||
if limit < 0 {
|
||||
return nil, errors.Wrap(errs.InvalidArgument, "limit must be -1 or non-negative")
|
||||
}
|
||||
balances, err := r.queries.GetBalancesByPkScript(ctx, gen.GetBalancesByPkScriptParams{
|
||||
Pkscript: hex.EncodeToString(pkScript),
|
||||
BlockHeight: int32(blockHeight),
|
||||
Limit: limit,
|
||||
Offset: offset,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error during query")
|
||||
}
|
||||
|
||||
result := make(map[runes.RuneId]*entity.Balance, len(balances))
|
||||
result := make([]*entity.Balance, 0, len(balances))
|
||||
for _, balanceModel := range balances {
|
||||
balance, err := mapBalanceModelToType(gen.RunesBalance(balanceModel))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse balance model")
|
||||
}
|
||||
result[balance.RuneId] = balance
|
||||
result = append(result, balance)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (r *Repository) GetBalancesByRuneId(ctx context.Context, runeId runes.RuneId, blockHeight uint64) ([]*entity.Balance, error) {
|
||||
func (r *Repository) GetBalancesByRuneId(ctx context.Context, runeId runes.RuneId, blockHeight uint64, limit int32, offset int32) ([]*entity.Balance, error) {
|
||||
if limit == -1 {
|
||||
limit = math.MaxInt32
|
||||
}
|
||||
if limit < 0 {
|
||||
return nil, errors.Wrap(errs.InvalidArgument, "limit must be -1 or non-negative")
|
||||
}
|
||||
balances, err := r.queries.GetBalancesByRuneId(ctx, gen.GetBalancesByRuneIdParams{
|
||||
RuneID: runeId.String(),
|
||||
BlockHeight: int32(blockHeight),
|
||||
Limit: limit,
|
||||
Offset: offset,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error during query")
|
||||
@@ -305,111 +459,135 @@ func (r *Repository) GetBalanceByPkScriptAndRuneId(ctx context.Context, pkScript
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (r *Repository) CreateRuneTransaction(ctx context.Context, tx *entity.RuneTransaction) error {
|
||||
if tx == nil {
|
||||
return nil
|
||||
}
|
||||
txParams, runestoneParams, err := mapRuneTransactionTypeToParams(*tx)
|
||||
func (r *Repository) GetTotalHoldersByRuneIds(ctx context.Context, runeIds []runes.RuneId, blockHeight uint64) (map[runes.RuneId]int64, error) {
|
||||
rows, err := r.queries.GetTotalHoldersByRuneIds(ctx, gen.GetTotalHoldersByRuneIdsParams{
|
||||
RuneIds: lo.Map(runeIds, func(runeId runes.RuneId, _ int) string { return runeId.String() }),
|
||||
BlockHeight: int32(blockHeight),
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to map rune transaction to params")
|
||||
return nil, errors.Wrap(err, "error during query")
|
||||
}
|
||||
if err = r.queries.CreateRuneTransaction(ctx, txParams); err != nil {
|
||||
return errors.Wrap(err, "error during exec CreateRuneTransaction")
|
||||
}
|
||||
if runestoneParams != nil {
|
||||
if err = r.queries.CreateRunestone(ctx, *runestoneParams); err != nil {
|
||||
return errors.Wrap(err, "error during exec CreateRunestone")
|
||||
holders := make(map[runes.RuneId]int64, len(rows))
|
||||
for _, row := range rows {
|
||||
runeId, err := runes.NewRuneIdFromString(row.RuneID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse RuneId")
|
||||
}
|
||||
holders[runeId] = row.Count
|
||||
}
|
||||
return holders, nil
|
||||
}
|
||||
|
||||
func (r *Repository) CreateRuneTransactions(ctx context.Context, txs []*entity.RuneTransaction) error {
|
||||
if len(txs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
txParams, err := mapRuneTransactionTypeToParamsBatch(txs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to map rune transactions to params")
|
||||
}
|
||||
if err := r.queries.BatchCreateRuneTransactions(ctx, txParams); err != nil {
|
||||
return errors.Wrap(err, "error during exec BatchCreateRuneTransactions")
|
||||
}
|
||||
|
||||
runestoneParams, err := mapRunestoneTypeToParamsBatch(txs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to map runestones to params")
|
||||
}
|
||||
if err := r.queries.BatchCreateRunestonesPatched(ctx, runestoneParams); err != nil {
|
||||
return errors.Wrap(err, "error during exec BatchCreateRunestones")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) CreateRuneEntry(ctx context.Context, entry *runes.RuneEntry, blockHeight uint64) error {
|
||||
if entry == nil {
|
||||
func (r *Repository) CreateRuneEntries(ctx context.Context, entries []*runes.RuneEntry) error {
|
||||
if len(entries) == 0 {
|
||||
return nil
|
||||
}
|
||||
createParams, _, err := mapRuneEntryTypeToParams(*entry, blockHeight)
|
||||
|
||||
params, err := mapRuneEntryTypeToParamsBatch(entries)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to map rune entry to params")
|
||||
return errors.Wrap(err, "failed to map rune entries to params")
|
||||
}
|
||||
if err = r.queries.CreateRuneEntry(ctx, createParams); err != nil {
|
||||
return errors.Wrap(err, "error during exec CreateRuneEntry")
|
||||
|
||||
if err := r.queries.BatchCreateRuneEntriesPatched(ctx, params); err != nil {
|
||||
return errors.Wrap(err, "error during exec")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) CreateRuneEntryState(ctx context.Context, entry *runes.RuneEntry, blockHeight uint64) error {
|
||||
if entry == nil {
|
||||
func (r *Repository) CreateRuneEntryStates(ctx context.Context, entries []*runes.RuneEntry, blockHeight uint64) error {
|
||||
if len(entries) == 0 {
|
||||
return nil
|
||||
}
|
||||
_, createStateParams, err := mapRuneEntryTypeToParams(*entry, blockHeight)
|
||||
|
||||
params, err := mapRuneEntryStatesTypeToParamsBatch(entries, blockHeight)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to map rune entry to params")
|
||||
return errors.Wrap(err, "failed to map rune entry states to params")
|
||||
}
|
||||
if err = r.queries.CreateRuneEntryState(ctx, createStateParams); err != nil {
|
||||
return errors.Wrap(err, "error during exec CreateRuneEntryState")
|
||||
|
||||
if err := r.queries.BatchCreateRuneEntryStatesPatched(ctx, params); err != nil {
|
||||
return errors.Wrap(err, "error during exec")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) CreateOutPointBalances(ctx context.Context, outPointBalances []*entity.OutPointBalance) error {
|
||||
params := make([]gen.CreateOutPointBalancesParams, 0, len(outPointBalances))
|
||||
for _, balance := range outPointBalances {
|
||||
param, err := mapOutPointBalanceTypeToParams(*balance)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to map outpoint balance to params")
|
||||
}
|
||||
params = append(params, param)
|
||||
if len(outPointBalances) == 0 {
|
||||
return nil
|
||||
}
|
||||
result := r.queries.CreateOutPointBalances(ctx, params)
|
||||
var execErrors []error
|
||||
result.Exec(func(i int, err error) {
|
||||
if err != nil {
|
||||
execErrors = append(execErrors, err)
|
||||
}
|
||||
})
|
||||
if len(execErrors) > 0 {
|
||||
return errors.Wrap(errors.Join(execErrors...), "error during exec")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) SpendOutPointBalances(ctx context.Context, outPoint wire.OutPoint, blockHeight uint64) error {
|
||||
if err := r.queries.SpendOutPointBalances(ctx, gen.SpendOutPointBalancesParams{
|
||||
TxHash: outPoint.Hash.String(),
|
||||
TxIdx: int32(outPoint.Index),
|
||||
SpentHeight: pgtype.Int4{Int32: int32(blockHeight), Valid: true},
|
||||
}); err != nil {
|
||||
params, err := mapOutPointBalanceTypeToParamsBatch(outPointBalances)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to map outpoint balances to params")
|
||||
}
|
||||
|
||||
if err := r.queries.BatchCreateRunesOutpointBalancesPatched(ctx, params); err != nil {
|
||||
return errors.Wrap(err, "error during exec")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) CreateRuneBalances(ctx context.Context, params []datagateway.CreateRuneBalancesParams) error {
|
||||
insertParams := make([]gen.CreateRuneBalanceAtBlockParams, 0, len(params))
|
||||
for _, param := range params {
|
||||
param := param
|
||||
amount, err := numericFromUint128(¶m.Balance)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to convert balance to numeric")
|
||||
}
|
||||
insertParams = append(insertParams, gen.CreateRuneBalanceAtBlockParams{
|
||||
Pkscript: hex.EncodeToString(param.PkScript),
|
||||
BlockHeight: int32(param.BlockHeight),
|
||||
RuneID: param.RuneId.String(),
|
||||
Amount: amount,
|
||||
})
|
||||
func (r *Repository) SpendOutPointBalancesBatch(ctx context.Context, outPoints []wire.OutPoint, blockHeight uint64) error {
|
||||
if len(outPoints) == 0 {
|
||||
return nil
|
||||
}
|
||||
result := r.queries.CreateRuneBalanceAtBlock(ctx, insertParams)
|
||||
var execErrors []error
|
||||
result.Exec(func(i int, err error) {
|
||||
if err != nil {
|
||||
execErrors = append(execErrors, err)
|
||||
}
|
||||
})
|
||||
if len(execErrors) > 0 {
|
||||
return errors.Wrap(errors.Join(execErrors...), "error during exec")
|
||||
|
||||
params := gen.BatchSpendOutpointBalancesParams{
|
||||
TxHashArr: make([]string, 0, len(outPoints)),
|
||||
TxIdxArr: make([]int32, 0, len(outPoints)),
|
||||
SpentHeight: int32(blockHeight),
|
||||
}
|
||||
for _, outPoint := range outPoints {
|
||||
params.TxHashArr = append(params.TxHashArr, outPoint.Hash.String())
|
||||
params.TxIdxArr = append(params.TxIdxArr, int32(outPoint.Index))
|
||||
}
|
||||
|
||||
if err := r.queries.BatchSpendOutpointBalances(ctx, params); err != nil {
|
||||
return errors.Wrap(err, "error during exec")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) CreateRuneBalances(ctx context.Context, balances []*entity.Balance) error {
|
||||
if len(balances) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
params, err := mapBalanceTypeToParamsBatch(balances)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to map rune balances to params")
|
||||
}
|
||||
if err := r.queries.BatchCreateRunesBalances(ctx, params); err != nil {
|
||||
return errors.Wrap(err, "error during exec")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -67,10 +67,11 @@ func New(injector do.Injector) (indexer.IndexerWorker, error) {
|
||||
}
|
||||
|
||||
processor := NewProcessor(runesDg, indexerInfoDg, bitcoinClient, conf.Network, reportingClient, cleanupFuncs)
|
||||
if err := processor.VerifyStates(ctx); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
if !conf.APIOnly {
|
||||
if err := processor.VerifyStates(ctx); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Mount API
|
||||
apiHandlers := lo.Uniq(conf.Modules.Runes.APIHandlers)
|
||||
for _, handler := range apiHandlers {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
"github.com/gaze-network/uint128"
|
||||
)
|
||||
|
||||
@@ -58,7 +59,8 @@ func ParseFlags(input interface{}) (Flags, error) {
|
||||
}
|
||||
return Flags(u128), nil
|
||||
default:
|
||||
panic("invalid flags input type")
|
||||
logger.Panic("invalid flags input type")
|
||||
return Flags{}, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
package runes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/Cleverse/go-utilities/utils"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
"github.com/gaze-network/uint128"
|
||||
)
|
||||
|
||||
@@ -29,6 +31,10 @@ var ErrInvalidBase26 = errors.New("invalid base-26 character: must be in the ran
|
||||
func NewRuneFromString(value string) (Rune, error) {
|
||||
n := uint128.From64(0)
|
||||
for i, char := range value {
|
||||
// skip spacers
|
||||
if char == '.' || char == '•' {
|
||||
continue
|
||||
}
|
||||
if i > 0 {
|
||||
n = n.Add(uint128.From64(1))
|
||||
}
|
||||
@@ -115,20 +121,25 @@ func (r Rune) Cmp(other Rune) int {
|
||||
func FirstRuneHeight(network common.Network) uint64 {
|
||||
switch network {
|
||||
case common.NetworkMainnet:
|
||||
return common.HalvingInterval * 4
|
||||
return 840_000
|
||||
case common.NetworkTestnet:
|
||||
return common.HalvingInterval * 12
|
||||
return 2_520_000
|
||||
case common.NetworkFractalMainnet:
|
||||
return 84_000
|
||||
case common.NetworkFractalTestnet:
|
||||
return 84_000
|
||||
}
|
||||
panic("invalid network")
|
||||
logger.Panic(fmt.Sprintf("invalid network: %s", network))
|
||||
return 0
|
||||
}
|
||||
|
||||
func MinimumRuneAtHeight(network common.Network, height uint64) Rune {
|
||||
offset := height + 1
|
||||
interval := common.HalvingInterval / 12
|
||||
interval := network.HalvingInterval() / 12
|
||||
|
||||
// runes are gradually unlocked from rune activation height until the next halving
|
||||
start := FirstRuneHeight(network)
|
||||
end := start + common.HalvingInterval
|
||||
end := start + network.HalvingInterval()
|
||||
|
||||
if offset < start {
|
||||
return (Rune)(unlockSteps[12])
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user