feat: add inscription processor code

This commit is contained in:
Gaze
2024-05-27 17:01:56 +07:00
parent bb3c24b472
commit 3bb26d012b
17 changed files with 1371 additions and 27 deletions

View File

@@ -292,3 +292,19 @@ func (d *BitcoinNodeDatasource) GetBlockHeader(ctx context.Context, height int64
return types.ParseMsgBlockHeader(*block, height), nil
}
// GetTransaction fetch transaction from Bitcoin node
func (d *BitcoinNodeDatasource) GetTransactionOutputs(ctx context.Context, txHash chainhash.Hash) ([]*types.TxOut, error) {
rawTx, err := d.btcclient.GetRawTransaction(&txHash)
if err != nil {
return nil, errors.Wrap(err, "failed to get raw transaction")
}
msgTx := rawTx.MsgTx()
txOuts := make([]*types.TxOut, 0, len(msgTx.TxOut))
for _, txOut := range msgTx.TxOut {
txOuts = append(txOuts, types.ParseTxOut(txOut))
}
return txOuts, nil
}

1
go.mod
View File

@@ -11,6 +11,7 @@ require (
github.com/gaze-network/uint128 v1.3.0
github.com/gofiber/fiber/v2 v2.52.4
github.com/golang-migrate/migrate/v4 v4.17.1
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/jackc/pgx/v5 v5.5.5
github.com/mcosta74/pgx-slog v0.3.0
github.com/planxnx/concurrent-stream v0.1.5

2
go.sum
View File

@@ -107,6 +107,8 @@ github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=

View File

@@ -24,9 +24,15 @@ CREATE TABLE IF NOT EXISTS "brc20_indexed_blocks" (
"hash" TEXT NOT NULL,
"prev_hash" TEXT NOT NULL,
"event_hash" TEXT NOT NULL,
"cumulative_event_hash" TEXT NOT NULL
"cumulative_event_hash" TEXT NOT NULL,
);
CREATE TABLE IF NOT EXISTS "brc20_processor_stats" (
"block_height" INT NOT NULL PRIMARY KEY,
"cursed_inscription_count" INT NOT NULL,
"blessed_inscription_count" INT NOT NULL,
)
CREATE TABLE IF NOT EXISTS "brc20_tickers" (
"tick" TEXT NOT NULL PRIMARY KEY, -- lowercase of original_tick
"original_tick" TEXT NOT NULL,
@@ -108,30 +114,44 @@ CREATE TABLE IF NOT EXISTS "brc20_balances" (
PRIMARY KEY ("pkscript", "ticker", "block_height")
);
CREATE TABLE IF NOT EXISTS "brc20_inscriptions" (
CREATE TABLE IF NOT EXISTS "brc20_inscription_entries" (
"id" TEXT NOT NULL PRIMARY KEY,
"number" BIGINT NOT NULL,
"sequence_number" BIGINT NOT NULL,
"delegate" TEXT, -- delegate inscription id
"metadata" BYTEA,
"metaprotocol" TEXT,
"parent" TEXT, -- parent inscription id
"parents" TEXT[], -- parent inscription id, 0.14 only supports 1 parent per inscription
"pointer" BIGINT,
"content" JSONB NOT NULL, -- can use jsonb because we only track brc20 inscriptions
"content_type" TEXT NOT NULL,
"transfer_count" INT NOT NULL,
"cursed" BOOLEAN NOT NULL, -- inscriptions after jubilee are no longer cursed in 0.14, which affects inscription number
"cursed_for_brc20" BOOLEAN NOT NULL, -- however, inscriptions that would normally be cursed are still considered cursed for brc20
"created_at" TIMESTAMP NOT NULL,
"created_at_height" INT NOT NULL
);
CREATE TABLE IF NOT EXISTS "brc20_inscription_locations" (
CREATE TABLE IF NOT EXISTS "brc20_inscription_states" (
"id" TEXT NOT NULL PRIMARY KEY,
"block_height" INT NOT NULL,
"transfer_count" INT NOT NULL,
);
CREATE TABLE IF NOT EXISTS "brc20_inscription_transfers" (
"inscription_id" TEXT NOT NULL,
"block_height" INT NOT NULL,
"tx_hash" TEXT NOT NULL,
"tx_idx" INT NOT NULL, -- output index
"sat_offset" BIGINT NOT NULL,
"old_satpoint_tx_hash" TEXT NOT NULL,
"old_satpoint_out_idx" INT NOT NULL,
"old_satpoint_offset" BIGINT NOT NULL,
"new_satpoint_tx_hash" TEXT NOT NULL,
"new_satpoint_out_idx" INT NOT NULL,
"new_satpoint_offset" BIGINT NOT NULL,
"new_pkscript" TEXT NOT NULL,
"new_output_value" DECIMAL NOT NULL
"sent_as_fee" BOOLEAN NOT NULL,
PRIMARY KEY ("inscription_id", "block_height")
);
CREATE INDEX IF NOT EXISTS brc20_inscription_locations_tx_hash_tx_idx_idx ON "brc20_inscription_locations" USING BTREE ("tx_hash", "tx_idx");
CREATE INDEX IF NOT EXISTS brc20_inscription_transfers_block_height_idx ON "brc20_inscription_transfers" USING BTREE ("block_height");
CREATE INDEX IF NOT EXISTS brc20_inscription_transfers_new_satpoint_idx ON "brc20_inscription_transfers" USING BTREE ("new_satpoint_tx_hash", "new_satpoint_out_idx", "new_satpoint_offset");
COMMIT;

View File

@@ -2,6 +2,10 @@ package datagateway
import (
"context"
"github.com/btcsuite/btcd/wire"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
)
type BRC20DataGateway interface {
@@ -18,9 +22,15 @@ type BRC20DataGatewayWithTx interface {
}
type BRC20ReaderDataGateway interface {
// TODO: add methods
GetProcessorStats(ctx context.Context) (*entity.ProcessorStats, error)
GetInscriptionsInOutPoint(ctx context.Context, outPoint wire.OutPoint) (map[ordinals.SatPoint]ordinals.InscriptionId, error)
GetInscriptionEntryById(ctx context.Context, id ordinals.InscriptionId) (*ordinals.InscriptionEntry, error)
}
type BRC20WriterDataGateway interface {
// TODO: add methods
CreateInscriptionEntries(ctx context.Context, blockHeight uint64, entries []*ordinals.InscriptionEntry) error
CreateInscriptionEntryStates(ctx context.Context, blockHeight uint64, entryStates []*ordinals.InscriptionEntry) error
CreateInscriptionTransfers(ctx context.Context, transfers []*entity.InscriptionTransfer) error
CreateProcessorStats(ctx context.Context, stats *entity.ProcessorStats) error
}

View File

@@ -0,0 +1,13 @@
package entity
import "github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
type InscriptionTransfer struct {
InscriptionId ordinals.InscriptionId
BlockHeight uint64
OldSatPoint ordinals.SatPoint
NewSatPoint ordinals.SatPoint
NewPkScript []byte
NewOutputValue uint64
SentAsFee bool
}

View File

@@ -0,0 +1,8 @@
package entity
type ProcessorStats struct {
BlockHeight uint64
CursedInscriptionCount uint64
BlessedInscriptionCount uint64
LostSats uint64
}

View File

@@ -5,7 +5,7 @@ import (
"encoding/binary"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
"github.com/samber/lo"
@@ -22,7 +22,7 @@ type Envelope struct {
UnrecognizedEvenField bool // True if payload contains unrecognized even field
}
func ParseEnvelopesFromTx(tx *wire.MsgTx) []*Envelope {
func ParseEnvelopesFromTx(tx *types.Transaction) []*Envelope {
envelopes := make([]*Envelope, 0)
for i, txIn := range tx.TxIn {

View File

@@ -6,12 +6,13 @@ import (
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/gaze-network/indexer-network/core/types"
"github.com/samber/lo"
"github.com/stretchr/testify/assert"
)
func TestParseEnvelopesFromTx(t *testing.T) {
testTx := func(t *testing.T, tx *wire.MsgTx, expected []*Envelope) {
testTx := func(t *testing.T, tx *types.Transaction, expected []*Envelope) {
t.Helper()
envelopes := ParseEnvelopesFromTx(tx)
@@ -20,10 +21,10 @@ func TestParseEnvelopesFromTx(t *testing.T) {
testParseWitness := func(t *testing.T, tapScript []byte, expected []*Envelope) {
t.Helper()
tx := &wire.MsgTx{
tx := &types.Transaction{
Version: 2,
LockTime: 0,
TxIn: []*wire.TxIn{
TxIn: []*types.TxIn{
{
Witness: wire.TxWitness{
tapScript,
@@ -55,19 +56,19 @@ func TestParseEnvelopesFromTx(t *testing.T) {
}
t.Run("empty_witness", func(t *testing.T) {
testTx(t, &wire.MsgTx{
testTx(t, &types.Transaction{
Version: 2,
LockTime: 0,
TxIn: []*wire.TxIn{{
TxIn: []*types.TxIn{{
Witness: wire.TxWitness{},
}},
}, []*Envelope{})
})
t.Run("ignore_key_path_spends", func(t *testing.T) {
testTx(t, &wire.MsgTx{
testTx(t, &types.Transaction{
Version: 2,
LockTime: 0,
TxIn: []*wire.TxIn{{
TxIn: []*types.TxIn{{
Witness: wire.TxWitness{
utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).
@@ -80,10 +81,10 @@ func TestParseEnvelopesFromTx(t *testing.T) {
}, []*Envelope{})
})
t.Run("ignore_key_path_spends_with_annex", func(t *testing.T) {
testTx(t, &wire.MsgTx{
testTx(t, &types.Transaction{
Version: 2,
LockTime: 0,
TxIn: []*wire.TxIn{{
TxIn: []*types.TxIn{{
Witness: wire.TxWitness{
utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).
@@ -462,10 +463,10 @@ func TestParseEnvelopesFromTx(t *testing.T) {
t.Run("extract_from_second_input", func(t *testing.T) {
testTx(
t,
&wire.MsgTx{
&types.Transaction{
Version: 2,
LockTime: 0,
TxIn: []*wire.TxIn{{}, {
TxIn: []*types.TxIn{{}, {
Witness: wire.TxWitness{
utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).

View File

@@ -0,0 +1,13 @@
package ordinals
import "github.com/gaze-network/indexer-network/common"
func GetJubileeHeight(network common.Network) uint64 {
switch network {
case common.NetworkMainnet:
return 824544
case common.NetworkTestnet:
return 2544192
}
panic("unsupported network")
}

View File

@@ -9,7 +9,7 @@ type Inscription struct {
Delegate *InscriptionId
Metadata []byte
Metaprotocol string
Parent *InscriptionId
Parent *InscriptionId // in 0.14, inscription has only one parent
Pointer *uint64
}
@@ -18,9 +18,9 @@ type InscriptionEntry struct {
Number int64
SequenceNumber uint64
Cursed bool
Vindicated bool
CursedForBRC20 bool
CreatedAt time.Time
CreatedAtHeight uint64
TransferCount uint32
Inscription Inscription
TransferCount uint32
}

View File

@@ -0,0 +1,29 @@
package processors
import (
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
)
type OriginOld struct {
OldSatPoint ordinals.SatPoint
}
type OriginNew struct {
Cursed bool
CursedForBRC20 bool
Fee uint64
Hidden bool
Parent *ordinals.InscriptionId
Pointer *uint64
Reinscription bool
Unbound bool
Inscription ordinals.Inscription
}
type Flotsam struct {
Offset uint64
InscriptionId ordinals.InscriptionId
Tx *types.Transaction
OriginOld *OriginOld // OriginOld and OriginNew are mutually exclusive
OriginNew *OriginNew // OriginOld and OriginNew are mutually exclusive
}

View File

@@ -0,0 +1,166 @@
package processors
import (
"context"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/brc20/internal/datagateway"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
"github.com/gaze-network/indexer-network/pkg/btcclient"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
"github.com/gaze-network/indexer-network/pkg/lru"
"github.com/samber/lo"
)
type Processor struct {
brc20Dg datagateway.BRC20DataGateway
btcClient btcclient.Contract
network common.Network
transferCountLimit uint32 // number of transfers to track per inscription
// block states
flotsamsSentAsFee []*Flotsam
blockReward uint64
// processor stats
cursedInscriptionCount uint64
blessedInscriptionCount uint64
lostSats uint64
// cache
outPointValueCache *lru.Cache[wire.OutPoint, uint64]
// flush buffers
newInscriptionTransfers []*entity.InscriptionTransfer
newInscriptionEntries map[ordinals.InscriptionId]*ordinals.InscriptionEntry
newInscriptionEntryStates map[ordinals.InscriptionId]*ordinals.InscriptionEntry
}
// TODO: move this to config
const outPointValueCacheSize = 100000
func NewInscriptionProcessor(ctx context.Context, brc20Dg datagateway.BRC20DataGateway, btcClient btcclient.Contract, network common.Network, transferCountLimit uint32) (*Processor, error) {
outPointValueCache, err := lru.New[wire.OutPoint, uint64](outPointValueCacheSize)
if err != nil {
return nil, errors.Wrap(err, "failed to create outPointValueCache")
}
stats, err := brc20Dg.GetProcessorStats(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to count cursed inscriptions")
}
return &Processor{
brc20Dg: brc20Dg,
btcClient: btcClient,
network: network,
transferCountLimit: transferCountLimit,
flotsamsSentAsFee: make([]*Flotsam, 0),
blockReward: 0,
cursedInscriptionCount: stats.CursedInscriptionCount,
blessedInscriptionCount: stats.BlessedInscriptionCount,
lostSats: stats.LostSats,
outPointValueCache: outPointValueCache,
newInscriptionTransfers: make([]*entity.InscriptionTransfer, 0),
newInscriptionEntries: make(map[ordinals.InscriptionId]*ordinals.InscriptionEntry),
newInscriptionEntryStates: make(map[ordinals.InscriptionId]*ordinals.InscriptionEntry),
}, nil
}
func (p *Processor) Process(ctx context.Context, blocks []*types.Block) error {
// collect all inputs to prefetch outpoints values
inputs := make([]wire.OutPoint, 0)
for _, block := range blocks {
for _, tx := range block.Transactions {
for _, txIn := range tx.TxIn {
inputs = append(inputs, wire.OutPoint{
Hash: txIn.PreviousOutTxHash,
Index: txIn.PreviousOutIndex,
})
}
}
}
// prefetch outpoint values to cache
_, err := p.getOutPointValues(ctx, inputs)
if err != nil {
return errors.Wrap(err, "failed to prefetch outpoint values")
}
for _, block := range blocks {
p.blockReward = p.getBlockSubsidy(uint64(block.Header.Height))
p.flotsamsSentAsFee = make([]*Flotsam, 0)
for _, tx := range block.Transactions {
if err := p.processTx(ctx, tx, block.Header); err != nil {
return errors.Wrap(err, "failed to process tx")
}
}
// TODO: add brc20 processing
if err := p.flushBlock(ctx, block.Header); err != nil {
return errors.Wrap(err, "failed to flush block")
}
}
return nil
}
func (p *Processor) flushBlock(ctx context.Context, blockHeader types.BlockHeader) error {
brc20DgTx, err := p.brc20Dg.BeginBRC20Tx(ctx)
if err != nil {
return errors.Wrap(err, "failed to begin transaction")
}
defer func() {
if err := brc20DgTx.Rollback(ctx); err != nil {
logger.WarnContext(ctx, "failed to rollback transaction",
slogx.Error(err),
slogx.String("event", "rollback_brc20_insertion"),
)
}
}()
blockHeight := uint64(blockHeader.Height)
// flush new inscription entries
{
newInscriptionEntries := lo.Values(p.newInscriptionEntries)
if err := brc20DgTx.CreateInscriptionEntries(ctx, blockHeight, newInscriptionEntries); err != nil {
return errors.Wrap(err, "failed to create inscription entries")
}
p.newInscriptionEntries = make(map[ordinals.InscriptionId]*ordinals.InscriptionEntry)
}
// flush new inscription entry states
{
newInscriptionEntryStates := lo.Values(p.newInscriptionEntryStates)
if err := brc20DgTx.CreateInscriptionEntryStates(ctx, blockHeight, newInscriptionEntryStates); err != nil {
return errors.Wrap(err, "failed to create inscription entry states")
}
p.newInscriptionEntryStates = make(map[ordinals.InscriptionId]*ordinals.InscriptionEntry)
}
// flush new inscription entry states
{
if err := brc20DgTx.CreateInscriptionTransfers(ctx, p.newInscriptionTransfers); err != nil {
return errors.Wrap(err, "failed to create inscription transfers")
}
p.newInscriptionTransfers = make([]*entity.InscriptionTransfer, 0)
}
// flush processor stats
{
stats := &entity.ProcessorStats{
BlockHeight: blockHeight,
CursedInscriptionCount: p.cursedInscriptionCount,
BlessedInscriptionCount: p.blessedInscriptionCount,
LostSats: p.lostSats,
}
if err := brc20DgTx.CreateProcessorStats(ctx, stats); err != nil {
return errors.Wrap(err, "failed to create processor stats")
}
}
return nil
}

View File

@@ -0,0 +1,429 @@
package processors
import (
"context"
"encoding/json"
"slices"
"strings"
"github.com/btcsuite/btcd/blockchain"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
"github.com/samber/lo"
)
func (p *Processor) processTx(ctx context.Context, tx *types.Transaction, blockHeader types.BlockHeader) error {
floatingInscriptions := make([]*Flotsam, 0)
envelopes := ordinals.ParseEnvelopesFromTx(tx)
totalInputValue := uint64(0)
totalOutputValue := lo.SumBy(tx.TxOut, func(txOut *types.TxOut) uint64 { return uint64(txOut.Value) })
inscribeOffsets := make(map[uint64]*struct {
inscriptionId ordinals.InscriptionId
count int
})
idCounter := uint32(0)
inputValues, err := p.getOutPointValues(ctx, lo.Map(tx.TxIn, func(txIn *types.TxIn, _ int) wire.OutPoint {
return wire.OutPoint{
Hash: txIn.PreviousOutTxHash,
Index: txIn.PreviousOutIndex,
}
}))
if err != nil {
return errors.Wrap(err, "failed to get outpoint values")
}
for i, input := range tx.TxIn {
inputOutPoint := wire.OutPoint{
Hash: input.PreviousOutTxHash,
Index: input.PreviousOutIndex,
}
inputValue := inputValues[inputOutPoint]
// skip coinbase inputs since there can't be an inscription in coinbase
if input.PreviousOutTxHash.IsEqual(&chainhash.Hash{}) {
totalInputValue += p.getBlockSubsidy(uint64(tx.BlockHeight))
continue
}
inscriptions, err := p.getInscriptionsInOutPoint(ctx, inputOutPoint)
if err != nil {
return errors.Wrap(err, "failed to get inscriptions in outpoint")
}
for satPoint, inscriptionId := range inscriptions {
offset := totalInputValue + satPoint.Offset
floatingInscriptions = append(floatingInscriptions, &Flotsam{
Offset: offset,
InscriptionId: inscriptionId,
Tx: tx,
OriginOld: &OriginOld{
OldSatPoint: satPoint,
},
})
if _, ok := inscribeOffsets[offset]; !ok {
inscribeOffsets[offset] = &struct {
inscriptionId ordinals.InscriptionId
count int
}{inscriptionId, 0}
}
inscribeOffsets[offset].count++
}
// offset on output to inscribe new inscriptions from this input
offset := totalInputValue
totalInputValue += inputValue
envelopesInInput := lo.Filter(envelopes, func(envelope *ordinals.Envelope, _ int) bool {
return envelope.InputIndex == uint32(i)
})
for _, envelope := range envelopesInInput {
inscriptionId := ordinals.InscriptionId{
TxHash: tx.TxHash,
Index: idCounter,
}
var cursed, cursedForBRC20 bool
if envelope.UnrecognizedEvenField || // unrecognized even field
envelope.DuplicateField || // duplicate field
envelope.IncompleteField || // incomplete field
envelope.InputIndex != 0 || // not first input
envelope.Offset != 0 || // not first envelope in input
envelope.Inscription.Pointer != nil || // contains pointer
envelope.PushNum || // contains pushnum opcodes
envelope.Stutter { // contains stuttering curse structure
cursed = true
cursedForBRC20 = true
}
if initial, ok := inscribeOffsets[offset]; !cursed && ok {
if initial.count > 1 {
cursed = true // reinscription
cursedForBRC20 = true
} else {
initialInscriptionEntry, err := p.brc20Dg.GetInscriptionEntryById(ctx, initial.inscriptionId)
if err != nil {
return errors.Wrap(err, "failed to get inscription entry")
}
if !initialInscriptionEntry.Cursed {
cursed = true // reinscription curse if initial inscription is not cursed
}
if initialInscriptionEntry.CursedForBRC20 {
cursedForBRC20 = true
}
}
}
// inscriptions are no longer cursed after jubilee, but BRC20 still considers them as cursed
if cursed && uint64(tx.BlockHeight) > ordinals.GetJubileeHeight(p.network) {
cursed = false
}
unbound := inputValue == 0 || envelope.UnrecognizedEvenField
if envelope.Inscription.Pointer != nil && *envelope.Inscription.Pointer < totalOutputValue {
offset = *envelope.Inscription.Pointer
}
floatingInscriptions = append(floatingInscriptions, &Flotsam{
Offset: offset,
InscriptionId: inscriptionId,
Tx: tx,
OriginNew: &OriginNew{
Reinscription: inscribeOffsets[offset] != nil,
Cursed: cursed,
CursedForBRC20: cursedForBRC20,
Fee: 0,
Hidden: false, // we don't care about this field for brc20
Parent: envelope.Inscription.Parent,
Pointer: envelope.Inscription.Pointer,
Unbound: unbound,
Inscription: envelope.Inscription,
},
})
if _, ok := inscribeOffsets[offset]; !ok {
inscribeOffsets[offset] = &struct {
inscriptionId ordinals.InscriptionId
count int
}{inscriptionId, 0}
}
inscribeOffsets[offset].count++
idCounter++
}
}
// parents must exist in floatingInscriptions to be valid
potentialParents := make(map[ordinals.InscriptionId]struct{})
for _, flotsam := range floatingInscriptions {
potentialParents[flotsam.InscriptionId] = struct{}{}
}
for _, flotsam := range floatingInscriptions {
if flotsam.OriginNew != nil && flotsam.OriginNew.Parent != nil {
if _, ok := potentialParents[*flotsam.OriginNew.Parent]; !ok {
// parent not found, ignore parent
flotsam.OriginNew.Parent = nil
}
}
}
// calculate fee for each new inscription
for _, flotsam := range floatingInscriptions {
if flotsam.OriginNew != nil {
flotsam.OriginNew.Fee = (totalInputValue - totalOutputValue) / uint64(idCounter)
}
}
// if tx is coinbase, add inscriptions sent as fee to outputs of this tx
ownInscriptionCount := len(floatingInscriptions)
isCoinbase := tx.TxIn[0].PreviousOutTxHash.IsEqual(&chainhash.Hash{})
if isCoinbase {
floatingInscriptions = append(floatingInscriptions, p.flotsamsSentAsFee...)
}
slices.SortFunc(floatingInscriptions, func(i, j *Flotsam) int {
return int(i.Offset) - int(j.Offset)
})
outputValue := uint64(0)
curIncrIdx := 0
// newLocations := make(map[ordinals.SatPoint][]*Flotsam)
type location struct {
satPoint ordinals.SatPoint
flotsam *Flotsam
sentAsFee bool
}
newLocations := make([]*location, 0)
outputToSumValue := make([]uint64, 0, len(tx.TxOut))
for outIndex, txOut := range tx.TxOut {
end := outputValue + uint64(txOut.Value)
// process all inscriptions that are supposed to be inscribed in this output
for curIncrIdx < len(floatingInscriptions) && floatingInscriptions[curIncrIdx].Offset < end {
newSatPoint := ordinals.SatPoint{
OutPoint: wire.OutPoint{
Hash: tx.TxHash,
Index: uint32(outIndex),
},
Offset: floatingInscriptions[curIncrIdx].Offset - outputValue,
}
// newLocations[newSatPoint] = append(newLocations[newSatPoint], floatingInscriptions[curIncrIdx])
newLocations = append(newLocations, &location{
satPoint: newSatPoint,
flotsam: floatingInscriptions[curIncrIdx],
sentAsFee: isCoinbase && curIncrIdx >= ownInscriptionCount, // if curIncrIdx >= ownInscriptionCount, then current inscription came from p.flotSamsSentAsFee
})
curIncrIdx++
}
outputValue = end
outputToSumValue = append(outputToSumValue, outputValue)
p.outPointValueCache.Add(wire.OutPoint{
Hash: tx.TxHash,
Index: uint32(outIndex),
}, uint64(txOut.Value))
}
for _, loc := range newLocations {
satPoint := loc.satPoint
flotsam := loc.flotsam
sentAsFee := loc.sentAsFee
// TODO: not sure if we still need to handle pointer here, it's already handled above.
if flotsam.OriginNew != nil && flotsam.OriginNew.Pointer != nil {
pointer := *flotsam.OriginNew.Pointer
for outIndex, outputValue := range outputToSumValue {
start := uint64(0)
if outIndex > 0 {
start = outputToSumValue[outIndex-1]
}
end := outputValue
if start <= pointer && pointer < end {
satPoint.Offset = pointer - start
break
}
}
}
if err := p.updateInscriptionLocation(ctx, satPoint, flotsam, sentAsFee, tx, blockHeader); err != nil {
return errors.Wrap(err, "failed to update inscription location")
}
}
// handle leftover flotsams (flotsams with offset over total output value) )
if isCoinbase {
// if there are leftover inscriptions in coinbase, they are lost permanently
for _, flotsam := range floatingInscriptions[curIncrIdx:] {
newSatPoint := ordinals.SatPoint{
OutPoint: wire.OutPoint{},
Offset: p.lostSats + flotsam.Offset - totalOutputValue,
}
if err := p.updateInscriptionLocation(ctx, newSatPoint, flotsam, false, tx, blockHeader); err != nil {
return errors.Wrap(err, "failed to update inscription location")
}
}
p.lostSats += p.blockReward - totalOutputValue
} else {
// if there are leftover inscriptions in non-coinbase tx, they are stored in p.flotsamsSentAsFee for processing in this block's coinbase tx
for _, flotsam := range floatingInscriptions[curIncrIdx:] {
flotsam.Offset = p.blockReward + flotsam.Offset - totalOutputValue
p.flotsamsSentAsFee = append(p.flotsamsSentAsFee, flotsam)
}
// add fees to block reward
p.blockReward = totalInputValue - totalOutputValue
}
return nil
}
func (p *Processor) updateInscriptionLocation(ctx context.Context, newSatPoint ordinals.SatPoint, flotsam *Flotsam, sentAsFee bool, tx *types.Transaction, blockHeader types.BlockHeader) error {
txOut := tx.TxOut[newSatPoint.OutPoint.Index]
if flotsam.OriginOld != nil {
transfer := &entity.InscriptionTransfer{
InscriptionId: flotsam.InscriptionId,
BlockHeight: uint64(tx.BlockHeight),
OldSatPoint: flotsam.OriginOld.OldSatPoint,
NewSatPoint: newSatPoint,
NewPkScript: txOut.PkScript,
NewOutputValue: uint64(txOut.Value),
SentAsFee: sentAsFee,
}
entry, err := p.getInscriptionEntryById(ctx, flotsam.InscriptionId)
if err != nil {
// skip inscriptions without entry (likely non-brc20 inscriptions)
if errors.Is(err, errs.NotFound) {
return nil
}
return errors.Wrap(err, "failed to get inscription entry")
}
entry.TransferCount++
// dont track transfers that exceed limit
if entry.TransferCount <= p.transferCountLimit {
p.newInscriptionTransfers = append(p.newInscriptionTransfers, transfer)
p.newInscriptionEntryStates[entry.Id] = entry
}
return nil
}
if flotsam.OriginNew != nil {
origin := flotsam.OriginNew
var inscriptionNumber int64
sequenceNumber := p.cursedInscriptionCount + p.blessedInscriptionCount
if origin.Cursed {
inscriptionNumber = -int64(p.cursedInscriptionCount + 1)
p.cursedInscriptionCount++
} else {
inscriptionNumber = int64(p.blessedInscriptionCount)
p.blessedInscriptionCount++
}
// insert only brc20 inscriptions to save space
if isBRC20Inscription(origin.Inscription) {
transfer := &entity.InscriptionTransfer{
InscriptionId: flotsam.InscriptionId,
BlockHeight: uint64(tx.BlockHeight),
OldSatPoint: ordinals.SatPoint{},
NewSatPoint: newSatPoint,
NewPkScript: txOut.PkScript,
NewOutputValue: uint64(txOut.Value),
SentAsFee: sentAsFee,
}
entry := &ordinals.InscriptionEntry{
Id: flotsam.InscriptionId,
Number: inscriptionNumber,
SequenceNumber: sequenceNumber,
Cursed: origin.Cursed,
CursedForBRC20: origin.CursedForBRC20,
CreatedAt: blockHeader.Timestamp,
CreatedAtHeight: uint64(tx.BlockHeight),
Inscription: origin.Inscription,
TransferCount: 1, // count inscription as first transfer
}
p.newInscriptionTransfers = append(p.newInscriptionTransfers, transfer)
p.newInscriptionEntries[entry.Id] = entry
p.newInscriptionEntryStates[entry.Id] = entry
}
return nil
}
panic("unreachable")
}
func isBRC20Inscription(inscription ordinals.Inscription) bool {
// check content type
if inscription.ContentType != "" {
contentType := inscription.ContentType
if lo.Contains([]string{"text/plain", "application/json"}, contentType) {
return true
}
prefixes := []string{"text/plain;", "application/json;"}
for _, prefix := range prefixes {
if strings.HasPrefix(contentType, prefix) {
return true
}
}
}
// attempt to parse content as json
if inscription.Content != nil {
var parsed interface{}
if err := json.Unmarshal(inscription.Content, &parsed); err == nil {
return true
}
}
return false
}
func (p *Processor) getOutPointValues(ctx context.Context, outPoints []wire.OutPoint) (map[wire.OutPoint]uint64, error) {
// try to get from cache if exists
cacheValues := p.outPointValueCache.MGet(outPoints)
outPointValues := make(map[wire.OutPoint]uint64)
outPointsToFetch := make([]wire.OutPoint, 0)
for i, outPoint := range outPoints {
if cacheValues[i] != 0 {
outPointValues[outPoint] = cacheValues[i]
} else {
outPointsToFetch = append(outPointsToFetch, outPoint)
}
}
// TODO: optimize fetching outpoint values
txHashes := make(map[chainhash.Hash]struct{})
for _, outPoint := range outPointsToFetch {
txHashes[outPoint.Hash] = struct{}{}
}
txOutsByHash := make(map[chainhash.Hash][]*types.TxOut)
for txHash := range txHashes {
txOuts, err := p.btcClient.GetTransactionOutputs(ctx, txHash)
if err != nil {
return nil, errors.Wrap(err, "failed to get transaction")
}
txOutsByHash[txHash] = txOuts
// update cache
for i, txOut := range txOuts {
p.outPointValueCache.Add(wire.OutPoint{Hash: txHash, Index: uint32(i)}, uint64(txOut.Value))
}
}
for i := range outPoints {
if outPointValues[outPoints[i]] == 0 {
outPointValues[outPoints[i]] = uint64(txOutsByHash[outPoints[i].Hash][outPoints[i].Index].Value)
}
}
return outPointValues, nil
}
func (p *Processor) getInscriptionsInOutPoint(ctx context.Context, outPoint wire.OutPoint) (map[ordinals.SatPoint]ordinals.InscriptionId, error) {
inscriptions, err := p.brc20Dg.GetInscriptionsInOutPoint(ctx, outPoint)
if err != nil {
return nil, errors.Wrap(err, "failed to get inscriptions by outpoint")
}
return inscriptions, nil
}
func (p *Processor) getInscriptionEntryById(ctx context.Context, inscriptionId ordinals.InscriptionId) (*ordinals.InscriptionEntry, error) {
if inscriptionEntry, ok := p.newInscriptionEntryStates[inscriptionId]; ok {
return inscriptionEntry, nil
}
inscription, err := p.brc20Dg.GetInscriptionEntryById(ctx, inscriptionId)
if err != nil {
return nil, errors.Wrap(err, "failed to get inscriptions by outpoint")
}
return inscription, nil
}
func (p *Processor) getBlockSubsidy(blockHeight uint64) uint64 {
return uint64(blockchain.CalcBlockSubsidy(int32(blockHeight), p.network.ChainParams()))
}

View File

@@ -9,4 +9,5 @@ import (
type Contract interface {
GetTransactionByHash(ctx context.Context, txHash chainhash.Hash) (*types.Transaction, error)
GetTransactionOutputs(ctx context.Context, txHash chainhash.Hash) ([]*types.TxOut, error)
}

267
pkg/lru/lru.go Normal file
View File

@@ -0,0 +1,267 @@
// lru a lru-cache package modified version of github.com/hashicorp/golang-lru
// TODO: create PR to hashicorp/golang-lru
package lru
import (
"sync"
"github.com/hashicorp/golang-lru/v2/simplelru"
)
const (
// DefaultEvictedBufferSize defines the default buffer size to store evicted key/val
DefaultEvictedBufferSize = 16
)
// Cache is a thread-safe fixed size LRU cache.
type Cache[K comparable, V any] struct {
lru *simplelru.LRU[K, V]
evictedKeys []K
evictedVals []V
onEvictedCB func(k K, v V)
lock sync.RWMutex
}
// New creates an LRU of the given size.
func New[K comparable, V any](size int) (*Cache[K, V], error) {
return NewWithEvict[K, V](size, nil)
}
// NewWithEvict constructs a fixed size cache with the given eviction
// callback.
func NewWithEvict[K comparable, V any](size int, onEvicted func(key K, value V)) (c *Cache[K, V], err error) {
// create a cache with default settings
c = &Cache[K, V]{
onEvictedCB: onEvicted,
}
if onEvicted != nil {
c.initEvictBuffers()
onEvicted = c.onEvicted
}
c.lru, err = simplelru.NewLRU(size, onEvicted)
return
}
func (c *Cache[K, V]) initEvictBuffers() {
c.evictedKeys = make([]K, 0, DefaultEvictedBufferSize)
c.evictedVals = make([]V, 0, DefaultEvictedBufferSize)
}
// onEvicted save evicted key/val and sent in externally registered callback
// outside of critical section
func (c *Cache[K, V]) onEvicted(k K, v V) {
c.evictedKeys = append(c.evictedKeys, k)
c.evictedVals = append(c.evictedVals, v)
}
// Purge is used to completely clear the cache.
func (c *Cache[K, V]) Purge() {
var ks []K
var vs []V
c.lock.Lock()
c.lru.Purge()
if c.onEvictedCB != nil && len(c.evictedKeys) > 0 {
ks, vs = c.evictedKeys, c.evictedVals
c.initEvictBuffers()
}
c.lock.Unlock()
// invoke callback outside of critical section
if c.onEvictedCB != nil {
for i := 0; i < len(ks); i++ {
c.onEvictedCB(ks[i], vs[i])
}
}
}
// Add adds a value to the cache. Returns true if an eviction occurred.
func (c *Cache[K, V]) Add(key K, value V) (evicted bool) {
var k K
var v V
c.lock.Lock()
evicted = c.lru.Add(key, value)
if c.onEvictedCB != nil && evicted {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && evicted {
c.onEvictedCB(k, v)
}
return
}
// Get looks up a key's value from the cache.
func (c *Cache[K, V]) Get(key K) (value V, ok bool) {
c.lock.Lock()
value, ok = c.lru.Get(key)
c.lock.Unlock()
return value, ok
}
// MGet looks up a multiple key's values from the cache.
// Returns a slice of value in the same order as the keys. value will be zero-value if key not found.
func (c *Cache[K, V]) MGet(keys []K) (values []V) {
c.lock.Lock()
defer c.lock.Unlock()
values = make([]V, 0, len(keys))
for _, key := range keys {
value, _ := c.lru.Get(key)
values = append(values, value)
}
return values
}
// Contains checks if a key is in the cache, without updating the
// recent-ness or deleting it for being stale.
func (c *Cache[K, V]) Contains(key K) bool {
c.lock.RLock()
containKey := c.lru.Contains(key)
c.lock.RUnlock()
return containKey
}
// Peek returns the key value (or undefined if not found) without updating
// the "recently used"-ness of the key.
func (c *Cache[K, V]) Peek(key K) (value V, ok bool) {
c.lock.RLock()
value, ok = c.lru.Peek(key)
c.lock.RUnlock()
return value, ok
}
// MPeek returns multiple key value (or undefined if not found) without updating
// the "recently used"-ness of the key.
func (c *Cache[K, V]) MPeek(keys []K) (values []V) {
c.lock.RLock()
defer c.lock.RUnlock()
values = make([]V, 0, len(keys))
for _, key := range keys {
value, _ := c.lru.Peek(key)
values = append(values, value)
}
return values
}
// ContainsOrAdd checks if a key is in the cache without updating the
// recent-ness or deleting it for being stale, and if not, adds the value.
// Returns whether found and whether an eviction occurred.
func (c *Cache[K, V]) ContainsOrAdd(key K, value V) (ok, evicted bool) {
var k K
var v V
c.lock.Lock()
if c.lru.Contains(key) {
c.lock.Unlock()
return true, false
}
evicted = c.lru.Add(key, value)
if c.onEvictedCB != nil && evicted {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && evicted {
c.onEvictedCB(k, v)
}
return false, evicted
}
// PeekOrAdd checks if a key is in the cache without updating the
// recent-ness or deleting it for being stale, and if not, adds the value.
// Returns whether found and whether an eviction occurred.
func (c *Cache[K, V]) PeekOrAdd(key K, value V) (previous V, ok, evicted bool) {
var k K
var v V
c.lock.Lock()
previous, ok = c.lru.Peek(key)
if ok {
c.lock.Unlock()
return previous, true, false
}
evicted = c.lru.Add(key, value)
if c.onEvictedCB != nil && evicted {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && evicted {
c.onEvictedCB(k, v)
}
return
}
// Remove removes the provided key from the cache.
func (c *Cache[K, V]) Remove(key K) (present bool) {
var k K
var v V
c.lock.Lock()
present = c.lru.Remove(key)
if c.onEvictedCB != nil && present {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && present {
c.onEvictedCB(k, v)
}
return
}
// Resize changes the cache size.
func (c *Cache[K, V]) Resize(size int) (evicted int) {
var ks []K
var vs []V
c.lock.Lock()
evicted = c.lru.Resize(size)
if c.onEvictedCB != nil && evicted > 0 {
ks, vs = c.evictedKeys, c.evictedVals
c.initEvictBuffers()
}
c.lock.Unlock()
if c.onEvictedCB != nil && evicted > 0 {
for i := 0; i < len(ks); i++ {
c.onEvictedCB(ks[i], vs[i])
}
}
return evicted
}
// RemoveOldest removes the oldest item from the cache.
func (c *Cache[K, V]) RemoveOldest() (key K, value V, ok bool) {
var k K
var v V
c.lock.Lock()
key, value, ok = c.lru.RemoveOldest()
if c.onEvictedCB != nil && ok {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && ok {
c.onEvictedCB(k, v)
}
return
}
// GetOldest returns the oldest entry
func (c *Cache[K, V]) GetOldest() (key K, value V, ok bool) {
c.lock.RLock()
key, value, ok = c.lru.GetOldest()
c.lock.RUnlock()
return
}
// Keys returns a slice of the keys in the cache, from oldest to newest.
func (c *Cache[K, V]) Keys() []K {
c.lock.RLock()
keys := c.lru.Keys()
c.lock.RUnlock()
return keys
}
// Len returns the number of items in the cache.
func (c *Cache[K, V]) Len() int {
c.lock.RLock()
length := c.lru.Len()
c.lock.RUnlock()
return length
}

368
pkg/lru/lru_test.go Normal file
View File

@@ -0,0 +1,368 @@
package lru
import (
"crypto/rand"
"math"
"math/big"
"testing"
)
func getRand(tb testing.TB) int64 {
out, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
if err != nil {
tb.Fatal(err)
}
return out.Int64()
}
func BenchmarkLRU_Rand(b *testing.B) {
l, err := New[int64, int64](8192)
if err != nil {
b.Fatalf("err: %v", err)
}
trace := make([]int64, b.N*2)
for i := 0; i < b.N*2; i++ {
trace[i] = getRand(b) % 32768
}
b.ResetTimer()
var hit, miss int
for i := 0; i < 2*b.N; i++ {
if i%2 == 0 {
l.Add(trace[i], trace[i])
} else {
if _, ok := l.Get(trace[i]); ok {
hit++
} else {
miss++
}
}
}
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
}
func BenchmarkLRU_Freq(b *testing.B) {
l, err := New[int64, int64](8192)
if err != nil {
b.Fatalf("err: %v", err)
}
trace := make([]int64, b.N*2)
for i := 0; i < b.N*2; i++ {
if i%2 == 0 {
trace[i] = getRand(b) % 16384
} else {
trace[i] = getRand(b) % 32768
}
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
l.Add(trace[i], trace[i])
}
var hit, miss int
for i := 0; i < b.N; i++ {
if _, ok := l.Get(trace[i]); ok {
hit++
} else {
miss++
}
}
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
}
func TestLRU(t *testing.T) {
evictCounter := 0
onEvicted := func(k int, v int) {
if k != v {
t.Fatalf("Evict values not equal (%v!=%v)", k, v)
}
evictCounter++
}
l, err := NewWithEvict(128, onEvicted)
if err != nil {
t.Fatalf("err: %v", err)
}
for i := 0; i < 256; i++ {
l.Add(i, i)
}
if l.Len() != 128 {
t.Fatalf("bad len: %v", l.Len())
}
if evictCounter != 128 {
t.Fatalf("bad evict count: %v", evictCounter)
}
for i, k := range l.Keys() {
if v, ok := l.Get(k); !ok || v != k || v != i+128 {
t.Fatalf("bad key: %v", k)
}
}
for i := 0; i < 128; i++ {
if _, ok := l.Get(i); ok {
t.Fatalf("should be evicted")
}
}
for i := 128; i < 256; i++ {
if _, ok := l.Get(i); !ok {
t.Fatalf("should not be evicted")
}
}
for i := 128; i < 192; i++ {
l.Remove(i)
if _, ok := l.Get(i); ok {
t.Fatalf("should be deleted")
}
}
l.Get(192) // expect 192 to be last key in l.Keys()
for i, k := range l.Keys() {
if (i < 63 && k != i+193) || (i == 63 && k != 192) {
t.Fatalf("out of order key: %v", k)
}
}
{
// test mget
keys := l.Keys()
values := l.MGet(keys)
for i, v := range values {
if keys[i] != v {
t.Fatalf("[%d]bad value: %v:%v", i, keys[i], v)
}
}
}
{
// test mget with missing keys
keys := append([]int{-1}, l.Keys()...)
values := l.MGet(keys)
if len(values) != len(keys) {
t.Fatalf("bad len: %v, expected: %v", len(values), len(keys))
}
if values[0] != 0 {
t.Fatalf("bad value: %v, expected: %v", values[0], 0)
}
for i := 1; i < len(values); i++ {
if keys[i] != values[i] {
t.Fatalf("[%d]bad value: %v:%v", i, keys[i], values[i])
}
}
}
l.Purge()
if l.Len() != 0 {
t.Fatalf("bad len: %v", l.Len())
}
if _, ok := l.Get(200); ok {
t.Fatalf("should contain nothing")
}
}
// test that Add returns true/false if an eviction occurred
func TestLRUAdd(t *testing.T) {
evictCounter := 0
onEvicted := func(k int, v int) {
evictCounter++
}
l, err := NewWithEvict(1, onEvicted)
if err != nil {
t.Fatalf("err: %v", err)
}
if l.Add(1, 1) == true || evictCounter != 0 {
t.Errorf("should not have an eviction")
}
if l.Add(2, 2) == false || evictCounter != 1 {
t.Errorf("should have an eviction")
}
}
// test that Contains doesn't update recent-ness
func TestLRUContains(t *testing.T) {
l, err := New[int, int](2)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
if !l.Contains(1) {
t.Errorf("1 should be contained")
}
l.Add(3, 3)
if l.Contains(1) {
t.Errorf("Contains should not have updated recent-ness of 1")
}
}
// test that ContainsOrAdd doesn't update recent-ness
func TestLRUContainsOrAdd(t *testing.T) {
l, err := New[int, int](2)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
contains, evict := l.ContainsOrAdd(1, 1)
if !contains {
t.Errorf("1 should be contained")
}
if evict {
t.Errorf("nothing should be evicted here")
}
l.Add(3, 3)
contains, evict = l.ContainsOrAdd(1, 1)
if contains {
t.Errorf("1 should not have been contained")
}
if !evict {
t.Errorf("an eviction should have occurred")
}
if !l.Contains(1) {
t.Errorf("now 1 should be contained")
}
}
// test that PeekOrAdd doesn't update recent-ness
func TestLRUPeekOrAdd(t *testing.T) {
l, err := New[int, int](2)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
previous, contains, evict := l.PeekOrAdd(1, 1)
if !contains {
t.Errorf("1 should be contained")
}
if evict {
t.Errorf("nothing should be evicted here")
}
if previous != 1 {
t.Errorf("previous is not equal to 1")
}
l.Add(3, 3)
contains, evict = l.ContainsOrAdd(1, 1)
if contains {
t.Errorf("1 should not have been contained")
}
if !evict {
t.Errorf("an eviction should have occurred")
}
if !l.Contains(1) {
t.Errorf("now 1 should be contained")
}
}
// test that Peek doesn't update recent-ness
func TestLRUPeek(t *testing.T) {
l, err := New[int, int](2)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
if v, ok := l.Peek(1); !ok || v != 1 {
t.Errorf("1 should be set to 1: %v, %v", v, ok)
}
vals := l.MPeek([]int{-1, 1, 2})
if len(vals) != 3 {
t.Errorf("bad len: %v", len(vals))
}
if vals[0] != 0 {
t.Errorf("bad value: %v, expected: %v", vals[0], 0)
}
if vals[1] != 1 || vals[2] != 2 {
t.Errorf("bad vals: %v", vals)
}
l.Add(3, 3)
if l.Contains(1) {
t.Errorf("should not have updated recent-ness of 1")
}
}
// test that Resize can upsize and downsize
func TestLRUResize(t *testing.T) {
onEvictCounter := 0
onEvicted := func(k int, v int) {
onEvictCounter++
}
l, err := NewWithEvict(2, onEvicted)
if err != nil {
t.Fatalf("err: %v", err)
}
// Downsize
l.Add(1, 1)
l.Add(2, 2)
evicted := l.Resize(1)
if evicted != 1 {
t.Errorf("1 element should have been evicted: %v", evicted)
}
if onEvictCounter != 1 {
t.Errorf("onEvicted should have been called 1 time: %v", onEvictCounter)
}
l.Add(3, 3)
if l.Contains(1) {
t.Errorf("Element 1 should have been evicted")
}
// Upsize
evicted = l.Resize(2)
if evicted != 0 {
t.Errorf("0 elements should have been evicted: %v", evicted)
}
l.Add(4, 4)
if !l.Contains(3) || !l.Contains(4) {
t.Errorf("Cache should have contained 2 elements")
}
}
func TestKeysAndMPeek(t *testing.T) {
l, err := New[int, int](2)
if err != nil {
t.Fatalf("err: %v", err)
}
if vals := l.MPeek(l.Keys()); len(vals) != 0 {
t.Errorf("bad len: %v", len(vals))
}
l.Add(1, 1)
l.Add(2, 2)
keys := l.Keys()
if len(keys) != 2 {
t.Errorf("bad len: %v", len(keys))
}
if keys[0] != 1 || keys[1] != 2 {
t.Errorf("bad keys: %v", keys)
}
vals := l.MPeek([]int{-1, 1, 2})
if len(vals) != 3 {
t.Errorf("bad len: %v", len(vals))
}
if vals[0] != 0 {
t.Errorf("bad value: %v, expected: %v", vals[0], 0)
}
if vals[1] != 1 || vals[2] != 2 {
t.Errorf("bad vals: %v", vals)
}
}