From 9d6e94f2382ca1e29829c848beee165d8181ea11 Mon Sep 17 00:00:00 2001 From: Lavanya Kasturi Date: Tue, 9 Aug 2022 10:33:19 -0700 Subject: [PATCH 01/24] docs: added examples for smart contract API's (#1260) --- docs/openapi.yaml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/docs/openapi.yaml b/docs/openapi.yaml index 580a3a17..276b9d57 100644 --- a/docs/openapi.yaml +++ b/docs/openapi.yaml @@ -924,7 +924,7 @@ paths: in: path description: Contract identifier formatted as `.` required: true - example: SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.staking-helper + example: SP6P4EJF0VG8V0RB3TQQKJBHDQKEF6NVRD1KZE3C.satoshibles schema: type: string - name: unanchored @@ -985,6 +985,7 @@ paths: in: path description: Contract identifier formatted as `.` required: true + example: SP6P4EJF0VG8V0RB3TQQKJBHDQKEF6NVRD1KZE3C.satoshibles schema: type: string - name: limit @@ -1039,12 +1040,14 @@ paths: in: path required: true description: Stacks address + example: SP6P4EJF0VG8V0RB3TQQKJBHDQKEF6NVRD1KZE3C schema: type: string - name: contract_name in: path required: true description: Contract name + example: satoshibles schema: type: string - name: tip @@ -1081,12 +1084,14 @@ paths: in: path required: true description: Stacks address + example: SP6P4EJF0VG8V0RB3TQQKJBHDQKEF6NVRD1KZE3C schema: type: string - name: contract_name in: path required: true description: Contract name + example: satoshibles schema: type: string - name: map_name @@ -1135,12 +1140,14 @@ paths: in: path required: true description: Stacks address + example: SP6P4EJF0VG8V0RB3TQQKJBHDQKEF6NVRD1KZE3C schema: type: string - name: contract_name in: path required: true description: Contract name + example: satoshibles schema: type: string - name: proof @@ -1184,12 +1191,14 @@ paths: in: path required: true description: Stacks address + example: SP6P4EJF0VG8V0RB3TQQKJBHDQKEF6NVRD1KZE3C schema: type: string - name: contract_name in: path required: true description: Contract name + example: satoshibles schema: type: string - name: function_name From 0d81369affb5a87676b03d25cbc07b032d289374 Mon Sep 17 00:00:00 2001 From: Lavanya Kasturi Date: Thu, 11 Aug 2022 11:16:39 -0700 Subject: [PATCH 02/24] docs: rosetta network options api (#1261) * Test Rosetta- network options api * Rosetta-example-test * Removed extra space * Formatted Json file * Added JSON examples for API's * Renamed file names * Renamed json examples --- ...a-account-balance-request-body.example.json | 10 ++++++++++ ...ccount-identifier-request-body.example.json | 11 +++++++++++ ...t-network-options-request-body.example.json | 9 +++++++++ .../rosetta-block-request-body.example.json | 10 ++++++++++ ...block-transaction-request-body.example.json | 12 ++++++++++++ ...phool-transaction-request-body.example.json | 9 +++++++++ docs/openapi.yaml | 18 ++++++++++++------ 7 files changed, 73 insertions(+), 6 deletions(-) create mode 100644 docs/api/rosetta/rosetta-account-balance-request-body.example.json create mode 100644 docs/api/rosetta/rosetta-account-identifier-request-body.example.json create mode 100644 docs/api/rosetta/rosetta-account-network-options-request-body.example.json create mode 100644 docs/api/rosetta/rosetta-block-request-body.example.json create mode 100644 docs/api/rosetta/rosetta-block-transaction-request-body.example.json create mode 100644 docs/api/rosetta/rosetta-memphool-transaction-request-body.example.json diff --git a/docs/api/rosetta/rosetta-account-balance-request-body.example.json b/docs/api/rosetta/rosetta-account-balance-request-body.example.json new file mode 100644 index 00000000..899b9abf --- /dev/null +++ b/docs/api/rosetta/rosetta-account-balance-request-body.example.json @@ -0,0 +1,10 @@ +{ + "network_identifier": { + "blockchain": "stacks", + "network": "mainnet" + }, + "account_identifier": { + "address": "SP2W6477BT5CRWVC5D5RFNPNAR8R2NW63SMMCAWMC", + "metadata": {} + } +} diff --git a/docs/api/rosetta/rosetta-account-identifier-request-body.example.json b/docs/api/rosetta/rosetta-account-identifier-request-body.example.json new file mode 100644 index 00000000..dad0e3d1 --- /dev/null +++ b/docs/api/rosetta/rosetta-account-identifier-request-body.example.json @@ -0,0 +1,11 @@ +{ + "network_identifier": { + "blockchain": "stacks", + "network": "mainnet" + }, + "public_key": { + "hex_bytes": "022d82baea2d041ac281bebafab11571f45db4f163a9e3f8640b1c804a4ac6f662", + "curve_type": "secp256k1" + }, + "metadata": {} +} diff --git a/docs/api/rosetta/rosetta-account-network-options-request-body.example.json b/docs/api/rosetta/rosetta-account-network-options-request-body.example.json new file mode 100644 index 00000000..d7827285 --- /dev/null +++ b/docs/api/rosetta/rosetta-account-network-options-request-body.example.json @@ -0,0 +1,9 @@ +[ + { + "network_identifier": { + "blockchain": "stacks", + "network": "mainnet" + }, + "metadata": {} + } +] diff --git a/docs/api/rosetta/rosetta-block-request-body.example.json b/docs/api/rosetta/rosetta-block-request-body.example.json new file mode 100644 index 00000000..76d63a31 --- /dev/null +++ b/docs/api/rosetta/rosetta-block-request-body.example.json @@ -0,0 +1,10 @@ +{ + "network_identifier": { + "blockchain": "stacks", + "network": "mainnet" + }, + "block_identifier": { + "index": 71107, + "hash": "0xce7e16561150f3a379845f4e96c3dd8f8396e397495821c9eec6b429391c529c" +} +} diff --git a/docs/api/rosetta/rosetta-block-transaction-request-body.example.json b/docs/api/rosetta/rosetta-block-transaction-request-body.example.json new file mode 100644 index 00000000..03e1208f --- /dev/null +++ b/docs/api/rosetta/rosetta-block-transaction-request-body.example.json @@ -0,0 +1,12 @@ +{ + "network_identifier": { + "blockchain": "stacks", + "network": "mainnet" + }, + "block_identifier": { + "hash": "0xce7e16561150f3a379845f4e96c3dd8f8396e397495821c9eec6b429391c529c" + }, + "transaction_identifier": { + "hash": "0x49354cc7b18dc5296c945a8e89f7d758dac14f1ab38d4c33dfe45ec1765ab339" + } +} diff --git a/docs/api/rosetta/rosetta-memphool-transaction-request-body.example.json b/docs/api/rosetta/rosetta-memphool-transaction-request-body.example.json new file mode 100644 index 00000000..31e83bc9 --- /dev/null +++ b/docs/api/rosetta/rosetta-memphool-transaction-request-body.example.json @@ -0,0 +1,9 @@ +{ + "network_identifier": { + "blockchain": "stacks", + "network": "mainnet" + }, + "transaction_identifier": { + "hash": "0xe6761e6ce26b366e1db70da31096f0de47f623e70f0b495b20f658b03bd21cea" + } +} diff --git a/docs/openapi.yaml b/docs/openapi.yaml index 276b9d57..91627164 100644 --- a/docs/openapi.yaml +++ b/docs/openapi.yaml @@ -1996,6 +1996,8 @@ paths: application/json: schema: $ref: ./api/rosetta/rosetta-network-options-request.schema.json + example: + $ref: ./api/rosetta/rosetta-account-network-options-request-body.example.json /rosetta/v1/network/status: post: @@ -2025,6 +2027,8 @@ paths: application/json: schema: $ref: ./api/rosetta/rosetta-network-status-request.schema.json + example: + $ref: ./api/rosetta/rosetta-account-network-options-request-body.example.json /rosetta/v1/account/balance: post: @@ -2054,6 +2058,8 @@ paths: application/json: schema: $ref: ./api/rosetta/rosetta-account-balance-request.schema.json + example: + $ref: ./api/rosetta/rosetta-account-balance-request-body.example.json /rosetta/v1/block: post: @@ -2081,6 +2087,8 @@ paths: application/json: schema: $ref: ./api/rosetta/rosetta-block-request.schema.json + example: + $ref: ./api/rosetta/rosetta-block-request-body.example.json /rosetta/v1/block/transaction: post: @@ -2108,6 +2116,8 @@ paths: application/json: schema: $ref: ./api/rosetta/rosetta-block-transaction-request.schema.json + example: + $ref: ./api/rosetta/rosetta-block-transaction-request-body.example.json /rosetta/v1/mempool: post: @@ -2135,6 +2145,8 @@ paths: application/json: schema: $ref: ./api/rosetta/rosetta-mempool-request.schema.json + example: + $ref: ./api/rosetta/rosetta-account-network-options-request-body.example.json /rosetta/v1/mempool/transaction: post: @@ -2285,8 +2297,6 @@ paths: application/json: schema: $ref: ./api/rosetta/rosetta-construction-preprocess-response.schema.json - example: - $ref: ./api/rosetta/rosetta-construction-preprocess-response.example.json 400: description: Error @@ -2404,8 +2414,6 @@ paths: application/json: schema: $ref: ./api/bns/namespace-operations/bns-get-namespace-price-response.schema.json - example: - $ref: ./api/bns/namespace-operations/bns-get-namespace-price-response.example.json /v2/prices/names/{name}: get: @@ -2429,8 +2437,6 @@ paths: application/json: schema: $ref: ./api/bns/name-querying/bns-get-name-price-response.schema.json - example: - $ref: ./api/bns/name-querying/bns-get-name-price-response.example.json /v1/namespaces: get: From 7190974e4bdec0c4c3a67f8fcfe371035c5dd541 Mon Sep 17 00:00:00 2001 From: Lavanya Kasturi Date: Wed, 17 Aug 2022 21:26:26 -0700 Subject: [PATCH 03/24] docs: added examples for microblocks, accounts, transactions and non-fungible tokens (#1268) * Added examples for missing API's * fix the hash example for v1/block/{hash} --- docs/openapi.yaml | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/docs/openapi.yaml b/docs/openapi.yaml index 91627164..3859544f 100644 --- a/docs/openapi.yaml +++ b/docs/openapi.yaml @@ -543,7 +543,7 @@ paths: required: true schema: type: string - example: 0x044b6ee5bdc7d3cb16e9a263e892b8060b4f64c72900b02093b9f6fa74ae8a67 + example: 0x3bfcdf84b3012adb544cf0f6df4835f93418c2269a3881885e27b3d58eb82d47 get: summary: Get microblock description: Retrieves a specific microblock by `hash` @@ -619,7 +619,7 @@ paths: - name: hash in: path description: Hash of the block - example: 0xbf06705d0b8c2389eb4b3a19ce72096b7158b158e2cd4ceb27d8de9db19bb3be + example: "0x4839a8b01cfb39ffcc0d07d3db31e848d5adf5279d529ed5062300b9f353ff79" required: true schema: type: string @@ -1382,16 +1382,17 @@ paths: - name: principal in: path description: Stacks address or a contract identifier - example: SP31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZW97B5P0 required: true schema: type: string + example: SP3FBR2AGK5H9QBDH3EEN6DF8EK8JY7RX8QJ5SVTE - name: tx_id in: path description: Transaction id required: true schema: type: string + example: 0x34d79c7cfc2fe525438736733e501a4bf0308a5556e3e080d1e2c0858aad7448 responses: 200: description: Success @@ -1419,29 +1420,31 @@ paths: - name: principal in: path description: Stacks address or a Contract identifier - example: SP31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZW97B5P0 required: true schema: type: string + example: SP31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZW97B5P0 - name: limit in: query description: max number of account transactions to fetch required: false schema: type: integer + example: 20 - name: offset in: query description: index of first account transaction to fetch required: false - example: 42000 schema: type: integer + example: 10 - name: height in: query description: Filter for transactions only at this given block height required: false schema: type: number + example: 66119 - name: unanchored in: query description: Include transaction data from unanchored (i.e. unconfirmed) microblocks @@ -1454,9 +1457,9 @@ paths: in: query description: returned data representing the state up until that point in time, rather than the current block. required: false - example: 60000 schema: type: string + example: 60000 responses: 200: description: Success @@ -1479,21 +1482,23 @@ paths: in: path description: Stacks address required: true - example: SP31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZW97B5P0 schema: type: string + example: SP31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZW97B5P0 - name: block_height in: query description: Optionally get the nonce at a given block height. required: false schema: type: number + example: 66119 - name: block_hash in: query description: Optionally get the nonce at a given block hash. Note - Use either of the query parameters but not both at a time. required: false schema: type: string + example: 0x72d53f3cba39e149dcd42708e535bdae03d73e60d2fe853aaf61c0b392f521e9 responses: 200: @@ -1526,13 +1531,14 @@ paths: required: false schema: type: integer + example: 20 - name: offset in: query description: index of first account assets to fetch required: false - example: 42000 schema: type: integer + example: 42000 - name: unanchored in: query description: Include transaction data from unanchored (i.e. unconfirmed) microblocks @@ -1545,9 +1551,9 @@ paths: in: query description: returned data representing the state at that point in time, rather than the current block. Note - Use either of the query parameters but not both at a time. required: false - example: 60000 schema: type: string + example: 60000 responses: 200: description: Success @@ -2849,7 +2855,7 @@ paths: in: path description: Height of block required: true - example: 69057 + example: 66119 schema: type: integer - name: limit @@ -3187,6 +3193,7 @@ paths: required: false schema: type: integer + example: 1 - name: offset in: query description: index of first tokens to fetch @@ -3207,7 +3214,7 @@ paths: /extended/v1/tokens/{contractId}/nft/metadata: get: operationId: get_contract_nft_metadata - summary: Non fungible tokens metadata for contract id + summary: Non fungible tokens metadata for contract ID description: Retrieves metadata for non fungible tokens for a given contract id. More information on Non-Fungible Tokens on the Stacks blockchain can be found [here](https://docs.stacks.co/write-smart-contracts/tokens#non-fungible-tokens-nfts). tags: - Non-Fungible Tokens From 057c541b8c31402b6ff823cce0e3ed435ebe74a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafael=20C=C3=A1rdenas?= Date: Thu, 18 Aug 2022 09:45:16 -0500 Subject: [PATCH 04/24] fix: refresh materialized views concurrently (#1270) * feat: refresh materialized views concurrently * fix: do not refresh concurrently on tests --- src/datastore/postgres-store.ts | 14 +-- ...1660595195398_materialized_view_indexes.ts | 98 +++++++++++++++++++ 2 files changed, 106 insertions(+), 6 deletions(-) create mode 100644 src/migrations/1660595195398_materialized_view_indexes.ts diff --git a/src/datastore/postgres-store.ts b/src/datastore/postgres-store.ts index aa8d7a50..bc202f2d 100644 --- a/src/datastore/postgres-store.ts +++ b/src/datastore/postgres-store.ts @@ -1131,7 +1131,8 @@ export class PgDataStore } async getChainTip( - client: ClientBase + client: ClientBase, + useMaterializedView = true ): Promise<{ blockHeight: number; blockHash: string; indexBlockHash: string }> { const currentTipBlock = await client.query<{ block_height: number; @@ -1141,7 +1142,7 @@ export class PgDataStore // The `chain_tip` materialized view is not available during event replay. // Since `getChainTip()` is used heavily during event ingestion, we'll fall back to // a classic query. - this.eventReplay + this.eventReplay || !useMaterializedView ? ` SELECT block_height, block_hash, index_block_hash FROM blocks @@ -1181,7 +1182,7 @@ export class PgDataStore // Sanity check: ensure incoming microblocks have a `parent_index_block_hash` that matches the API's // current known canonical chain tip. We assume this holds true so incoming microblock data is always // treated as being built off the current canonical anchor block. - const chainTip = await this.getChainTip(client); + const chainTip = await this.getChainTip(client, false); const nonCanonicalMicroblock = data.microblocks.find( mb => mb.parent_index_block_hash !== chainTip.indexBlockHash ); @@ -1312,7 +1313,7 @@ export class PgDataStore async update(data: DataStoreBlockUpdateData): Promise { const tokenMetadataQueueEntries: DbTokenMetadataQueueEntry[] = []; await this.queryTx(async client => { - const chainTip = await this.getChainTip(client); + const chainTip = await this.getChainTip(client, false); await this.handleReorg(client, data.block, chainTip.blockHeight); // If the incoming block is not of greater height than current chain tip, then store data as non-canonical. const isCanonical = data.block.block_height > chainTip.blockHeight; @@ -3564,7 +3565,7 @@ export class PgDataStore async updateMempoolTxs({ mempoolTxs: txs }: { mempoolTxs: DbMempoolTx[] }): Promise { const updatedTxs: DbMempoolTx[] = []; await this.queryTx(async client => { - const chainTip = await this.getChainTip(client); + const chainTip = await this.getChainTip(client, false); for (const tx of txs) { const result = await client.query( ` @@ -5434,7 +5435,8 @@ export class PgDataStore if (this.eventReplay && skipDuringEventReplay) { return; } - await client.query(`REFRESH MATERIALIZED VIEW ${viewName}`); + const concurrently = isProdEnv ? 'CONCURRENTLY' : ''; + await client.query(`REFRESH MATERIALIZED VIEW ${concurrently} ${viewName}`); } async getSmartContractByTrait(args: { diff --git a/src/migrations/1660595195398_materialized_view_indexes.ts b/src/migrations/1660595195398_materialized_view_indexes.ts new file mode 100644 index 00000000..68db1bfd --- /dev/null +++ b/src/migrations/1660595195398_materialized_view_indexes.ts @@ -0,0 +1,98 @@ +/* eslint-disable @typescript-eslint/camelcase */ +import { MigrationBuilder, ColumnDefinitions } from 'node-pg-migrate'; + +export const shorthands: ColumnDefinitions | undefined = undefined; + +export async function up(pgm: MigrationBuilder): Promise { + // Add LIMIT 1 to chain_tip view so we can add the uniqueness index for `block_height`. + pgm.dropMaterializedView('chain_tip'); + pgm.createMaterializedView('chain_tip', {}, ` + WITH block_tip AS ( + SELECT block_height, block_hash, index_block_hash + FROM blocks + WHERE block_height = (SELECT MAX(block_height) FROM blocks WHERE canonical = TRUE) + ), + microblock_tip AS ( + SELECT microblock_hash, microblock_sequence + FROM microblocks, block_tip + WHERE microblocks.parent_index_block_hash = block_tip.index_block_hash + AND microblock_canonical = true AND canonical = true + ORDER BY microblock_sequence DESC + LIMIT 1 + ), + microblock_count AS ( + SELECT COUNT(*)::INTEGER AS microblock_count + FROM microblocks + WHERE canonical = TRUE AND microblock_canonical = TRUE + ), + tx_count AS ( + SELECT COUNT(*)::INTEGER AS tx_count + FROM txs + WHERE canonical = TRUE AND microblock_canonical = TRUE + AND block_height <= (SELECT MAX(block_height) FROM blocks WHERE canonical = TRUE) + ), + tx_count_unanchored AS ( + SELECT COUNT(*)::INTEGER AS tx_count_unanchored + FROM txs + WHERE canonical = TRUE AND microblock_canonical = TRUE + ) + SELECT *, block_tip.block_height AS block_count + FROM block_tip + LEFT JOIN microblock_tip ON TRUE + LEFT JOIN microblock_count ON TRUE + LEFT JOIN tx_count ON TRUE + LEFT JOIN tx_count_unanchored ON TRUE + LIMIT 1 + `); + + pgm.addIndex('chain_tip', 'block_height', { unique: true }); + pgm.addIndex('mempool_digest', 'digest', { unique: true }); + pgm.addIndex('nft_custody', ['asset_identifier', 'value'], { unique: true }); + pgm.addIndex('nft_custody_unanchored', ['asset_identifier', 'value'], { unique: true }); +} + +export async function down(pgm: MigrationBuilder): Promise { + pgm.dropIndex('chain_tip', 'block_height', { unique: true, ifExists: true }); + pgm.dropIndex('mempool_digest', 'digest', { unique: true, ifExists: true }); + pgm.dropIndex('nft_custody', ['asset_identifier', 'value'], { unique: true, ifExists: true }); + pgm.dropIndex('nft_custody_unanchored', ['asset_identifier', 'value'], { unique: true, ifExists: true }); + + pgm.dropMaterializedView('chain_tip'); + pgm.createMaterializedView('chain_tip', {}, ` + WITH block_tip AS ( + SELECT block_height, block_hash, index_block_hash + FROM blocks + WHERE block_height = (SELECT MAX(block_height) FROM blocks WHERE canonical = TRUE) + ), + microblock_tip AS ( + SELECT microblock_hash, microblock_sequence + FROM microblocks, block_tip + WHERE microblocks.parent_index_block_hash = block_tip.index_block_hash + AND microblock_canonical = true AND canonical = true + ORDER BY microblock_sequence DESC + LIMIT 1 + ), + microblock_count AS ( + SELECT COUNT(*)::INTEGER AS microblock_count + FROM microblocks + WHERE canonical = TRUE AND microblock_canonical = TRUE + ), + tx_count AS ( + SELECT COUNT(*)::INTEGER AS tx_count + FROM txs + WHERE canonical = TRUE AND microblock_canonical = TRUE + AND block_height <= (SELECT MAX(block_height) FROM blocks WHERE canonical = TRUE) + ), + tx_count_unanchored AS ( + SELECT COUNT(*)::INTEGER AS tx_count_unanchored + FROM txs + WHERE canonical = TRUE AND microblock_canonical = TRUE + ) + SELECT *, block_tip.block_height AS block_count + FROM block_tip + LEFT JOIN microblock_tip ON TRUE + LEFT JOIN microblock_count ON TRUE + LEFT JOIN tx_count ON TRUE + LEFT JOIN tx_count_unanchored ON TRUE + `); +} From ce912dc3778071586db836e888b070fe714f822e Mon Sep 17 00:00:00 2001 From: semantic-release-bot Date: Thu, 18 Aug 2022 14:58:07 +0000 Subject: [PATCH 05/24] chore(release): 4.1.2 [skip ci] ## [4.1.2](https://github.com/hirosystems/stacks-blockchain-api/compare/v4.1.1...v4.1.2) (2022-08-18) ### Bug Fixes * refresh materialized views concurrently ([#1270](https://github.com/hirosystems/stacks-blockchain-api/issues/1270)) ([057c541](https://github.com/hirosystems/stacks-blockchain-api/commit/057c541b8c31402b6ff823cce0e3ed435ebe74a8)) --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6cd84f5e..91bb6359 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,10 @@ +## [4.1.2](https://github.com/hirosystems/stacks-blockchain-api/compare/v4.1.1...v4.1.2) (2022-08-18) + + +### Bug Fixes + +* refresh materialized views concurrently ([#1270](https://github.com/hirosystems/stacks-blockchain-api/issues/1270)) ([057c541](https://github.com/hirosystems/stacks-blockchain-api/commit/057c541b8c31402b6ff823cce0e3ed435ebe74a8)) + ## [4.1.1](https://github.com/hirosystems/stacks-blockchain-api/compare/v4.1.0...v4.1.1) (2022-08-03) From a640eb136cf01d5068c395d408318978e559f9f7 Mon Sep 17 00:00:00 2001 From: Lavanya Kasturi Date: Tue, 23 Aug 2022 08:42:33 -0500 Subject: [PATCH 06/24] docs: cleaned up OpenAPI.yaml for consistency with all the examples (#1282) * Added examples for missing API's * fix the hash example for v1/block/{hash} * Fixed examples for string types * Updated examples to follow yaml syntax --- docs/openapi.yaml | 199 ++++++++++++++++++++++------------------------ 1 file changed, 97 insertions(+), 102 deletions(-) diff --git a/docs/openapi.yaml b/docs/openapi.yaml index 3859544f..329c25bf 100644 --- a/docs/openapi.yaml +++ b/docs/openapi.yaml @@ -139,7 +139,7 @@ paths: required: true schema: type: string - example: 2N4M94S1ZPt8HfxydXzL2P7qyzgVq7MHWts + example: "2N4M94S1ZPt8HfxydXzL2P7qyzgVq7MHWts" post: summary: Add testnet BTC tokens to address description: | @@ -192,25 +192,25 @@ paths: in: query description: max number of transactions to fetch required: false - example: 100 schema: type: integer default: 96 maximum: 200 + example: 100 - name: offset in: query description: index of first transaction to fetch required: false - example: 42000 schema: type: integer + example: 42000 - name: type in: query description: Filter by transaction type required: false - example: coinbase schema: type: array + example: coinbase items: type: string enum: [coinbase, token_transfer, smart_contract, contract_call, poison_microblock] @@ -218,7 +218,6 @@ paths: in: query description: Include transaction data from unanchored (i.e. unconfirmed) microblocks required: false - example: true schema: type: boolean example: true @@ -248,9 +247,9 @@ paths: in: query description: Filter to only return transactions with this sender address. required: false - example: SP1GPBP8NBRXDRJBFQBV7KMAZX1Z7W2RFWJEH0V10 schema: type: string + example: "SP1GPBP8NBRXDRJBFQBV7KMAZX1Z7W2RFWJEH0V10" - name: recipient_address in: query description: Filter to only return transactions with this recipient address (only applicable for STX transfer tx types). @@ -267,18 +266,18 @@ paths: in: query description: max number of mempool transactions to fetch required: false - example: 100 schema: type: integer default: 96 maximum: 200 + example: 100 - name: offset in: query description: index of first mempool transaction to fetch required: false - example: 42000 schema: type: integer + example: 42000 - name: unanchored in: query description: Include transaction data from unanchored (i.e. unconfirmed) microblocks @@ -323,9 +322,9 @@ paths: in: query description: index of first mempool transaction to fetch required: false - example: 42000 schema: type: integer + example: 42000 responses: 200: description: List of dropped mempool transactions @@ -342,9 +341,9 @@ paths: in: query description: Array of transaction ids required: true - example: 0x0a411719e3bfde95f9e227a2d7f8fac3d6c646b1e6cc186db0e2838a2c6cd9c0 schema: type: array + example: "0x0a411719e3bfde95f9e227a2d7f8fac3d6c646b1e6cc186db0e2838a2c6cd9c0" items: type: string - name: event_offset @@ -396,9 +395,9 @@ paths: in: path description: Hash of transaction required: true - example: 0x0a411719e3bfde95f9e227a2d7f8fac3d6c646b1e6cc186db0e2838a2c6cd9c0 schema: type: string + example: "0x0a411719e3bfde95f9e227a2d7f8fac3d6c646b1e6cc186db0e2838a2c6cd9c0" - name: event_offset in: query schema: @@ -446,9 +445,9 @@ paths: in: path description: Hash of transaction required: true - example: 0x0a411719e3bfde95f9e227a2d7f8fac3d6c646b1e6cc186db0e2838a2c6cd9c0 schema: type: string + example: "0x0a411719e3bfde95f9e227a2d7f8fac3d6c646b1e6cc186db0e2838a2c6cd9c0" get: summary: Get Raw Transaction tags: @@ -481,7 +480,7 @@ paths: schema: type: string format: binary - example: binary format of 00000000010400bed38c2aadffa348931bcb542880ff79d607afec000000000000000000000000000000c800012b0b1fff6cccd0974966dcd665835838f0985be508e1322e09fb3d751eca132c492bda720f9ef1768d14fdabed6127560ba52d5e3ac470dcb60b784e97dc88c9030200000000000516df0ba3e79792be7be5e50a370289accfc8c9e032000000000000303974657374206d656d6f00000000000000000000000000000000000000000000000000 + example: binary format of 00000000010400bed38c2aadffa348931bcb542880ff79d607afec000000000000000000000000000000c800012b0b1fff6cccd0974966dcd665835838f0985be508e1322e09fb3d751eca132c492bda720f9ef1768d14fdabed6127560ba52d5e3ac470dcb60b784e97dc88c9030200000000000516df0ba3e79792be7be5e50a370289accfc8c9e032000000000000303974657374206d656d6f00000000000000000000000000000000000000000000000000 responses: 200: description: Transaction id of successful post of a raw tx to the node's mempool @@ -489,7 +488,7 @@ paths: text/plain: schema: type: string - example: '"e161978626f216b2141b156ade10501207ae535fa365a13ef5d7a7c9310a09f2"' + example: "e161978626f216b2141b156ade10501207ae535fa365a13ef5d7a7c9310a09f2" 400: description: Rejections result in a 400 error content: @@ -523,10 +522,9 @@ paths: in: query description: Index of the first microblock to fetch required: false - example: 42000 schema: type: integer - example: 1000 + example: 42000 responses: 200: description: List of microblocks @@ -543,7 +541,8 @@ paths: required: true schema: type: string - example: 0x3bfcdf84b3012adb544cf0f6df4835f93418c2269a3881885e27b3d58eb82d47 + example: "0x3bfcdf84b3012adb544cf0f6df4835f93418c2269a3881885e27b3d58eb82d47" + get: summary: Get microblock description: Retrieves a specific microblock by `hash` @@ -619,10 +618,10 @@ paths: - name: hash in: path description: Hash of the block - example: "0x4839a8b01cfb39ffcc0d07d3db31e848d5adf5279d529ed5062300b9f353ff79" required: true schema: type: string + example: "0x4839a8b01cfb39ffcc0d07d3db31e848d5adf5279d529ed5062300b9f353ff79" get: summary: Get block by hash description: Retrieves block details of a specific block for a given chain height. You can use the hash from your latest block ('get_block_list' API) to get your block details. @@ -650,9 +649,9 @@ paths: in: path description: Height of the block required: true - example: 10000 schema: type: number + example: 10000 get: summary: Get block by height description: Retrieves block details of a specific block at a given block height @@ -682,7 +681,7 @@ paths: required: true schema: type: string - example: 0x00000000000000000002bba732926cf68b6eda3e2cdbc2a85af79f10efeeeb10 + example: "0x00000000000000000002bba732926cf68b6eda3e2cdbc2a85af79f10efeeeb10" get: summary: Get block by burnchain block hash description: Retrieves block details of a specific block for a given burnchain block hash @@ -756,9 +755,9 @@ paths: in: query description: index of the first items to fetch required: false - example: 42000 schema: type: integer + example: 42000 responses: 200: description: List of burnchain reward recipients and amounts @@ -781,9 +780,9 @@ paths: in: path description: Reward slot holder recipient address. Should either be in the native burnchain's format (e.g. B58 for Bitcoin), or if a STX principal address is provided it will be encoded as into the equivalent burnchain format required: true - example: 36hQtSEXBMevo5chpxhfAGiCTSC34QKgda schema: type: string + example: "36hQtSEXBMevo5chpxhfAGiCTSC34QKgda" - name: limit in: query description: max number of items to fetch @@ -794,9 +793,9 @@ paths: in: query description: index of the first items to fetch required: false - example: 42000 schema: type: integer + example: 42000 responses: 200: description: List of burnchain reward recipients and amounts @@ -827,9 +826,9 @@ paths: in: query description: index of first rewards to fetch required: false - example: 42000 schema: type: integer + example: 42000 responses: 200: description: List of burnchain reward recipients and amounts @@ -851,9 +850,9 @@ paths: in: path description: Reward recipient address. Should either be in the native burnchain's format (e.g. B58 for Bitcoin), or if a STX principal address is provided it will be encoded as into the equivalent burnchain format required: true - example: 36hQtSEXBMevo5chpxhfAGiCTSC34QKgda schema: type: string + example: "36hQtSEXBMevo5chpxhfAGiCTSC34QKgda" - name: limit in: query description: max number of rewards to fetch @@ -864,9 +863,9 @@ paths: in: query description: index of first rewards to fetch required: false - example: 42000 schema: type: integer + example: 42000 responses: 200: description: List of burnchain reward recipients and amounts @@ -888,9 +887,9 @@ paths: in: path description: Reward recipient address. Should either be in the native burnchain's format (e.g. B58 for Bitcoin), or if a STX principal address is provided it will be encoded as into the equivalent burnchain format required: true - example: 36hQtSEXBMevo5chpxhfAGiCTSC34QKgda schema: type: string + example: "36hQtSEXBMevo5chpxhfAGiCTSC34QKgda" responses: 200: description: List of burnchain reward recipients and amounts @@ -924,9 +923,9 @@ paths: in: path description: Contract identifier formatted as `.` required: true - example: SP6P4EJF0VG8V0RB3TQQKJBHDQKEF6NVRD1KZE3C.satoshibles schema: type: string + example: "SP6P4EJF0VG8V0RB3TQQKJBHDQKEF6NVRD1KZE3C.satoshibles" - name: unanchored in: query description: Include transaction data from unanchored (i.e. unconfirmed) microblocks @@ -985,9 +984,9 @@ paths: in: path description: Contract identifier formatted as `.` required: true - example: SP6P4EJF0VG8V0RB3TQQKJBHDQKEF6NVRD1KZE3C.satoshibles schema: type: string + example: "SP6P4EJF0VG8V0RB3TQQKJBHDQKEF6NVRD1KZE3C.satoshibles" - name: limit in: query description: max number of contract events to fetch @@ -998,9 +997,9 @@ paths: in: query description: index of first contract event to fetch required: false - example: 42000 schema: type: integer + example: 42000 - name: unanchored in: query description: Include transaction data from unanchored (i.e. unconfirmed) microblocks @@ -1047,9 +1046,9 @@ paths: in: path required: true description: Contract name - example: satoshibles schema: type: string + example: "satoshibles" - name: tip in: query schema: @@ -1084,16 +1083,16 @@ paths: in: path required: true description: Stacks address - example: SP6P4EJF0VG8V0RB3TQQKJBHDQKEF6NVRD1KZE3C schema: type: string + example: "SP6P4EJF0VG8V0RB3TQQKJBHDQKEF6NVRD1KZE3C" - name: contract_name in: path required: true description: Contract name - example: satoshibles schema: type: string + example: satoshibles - name: map_name in: path required: true @@ -1140,16 +1139,16 @@ paths: in: path required: true description: Stacks address - example: SP6P4EJF0VG8V0RB3TQQKJBHDQKEF6NVRD1KZE3C schema: type: string + example: "SP6P4EJF0VG8V0RB3TQQKJBHDQKEF6NVRD1KZE3C" - name: contract_name in: path required: true description: Contract name - example: satoshibles schema: type: string + example: satoshibles - name: proof in: query description: Returns object without the proof field if set to 0 @@ -1191,16 +1190,16 @@ paths: in: path required: true description: Stacks address - example: SP6P4EJF0VG8V0RB3TQQKJBHDQKEF6NVRD1KZE3C schema: type: string + example: "SP6P4EJF0VG8V0RB3TQQKJBHDQKEF6NVRD1KZE3C" - name: contract_name in: path required: true description: Contract name - example: satoshibles schema: type: string + example: satoshibles - name: function_name in: path required: true @@ -1238,9 +1237,9 @@ paths: in: path description: Stacks address or a Contract identifier required: true - example: SP31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZW97B5P0 schema: type: string + example: "SP31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZW97B5P0" - name: unanchored in: query description: Include transaction data from unanchored (i.e. unconfirmed) microblocks @@ -1253,9 +1252,9 @@ paths: in: query description: returned data representing the state up until that point in time, rather than the current block. required: false - example: 60000 schema: type: string + example: 60000 responses: 200: description: Success @@ -1277,14 +1276,13 @@ paths: - name: principal in: path description: Stacks address or a Contract identifier. - example: SP31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZW97B5P0 required: true schema: type: string + example: "SP31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZW97B5P0" - name: unanchored in: query description: Include transaction data from unanchored (i.e. unconfirmed) microblocks. - example: true required: false schema: type: boolean @@ -1294,9 +1292,9 @@ paths: in: query description: returned data representing the state up until that point in time, rather than the current block. Note - Use either of the query parameters but not both at a time. required: false - example: 60000 schema: type: string + example: 60000 responses: 200: description: Success @@ -1320,11 +1318,11 @@ paths: parameters: - name: principal in: path - description: Stacks address or a Contract identifier - example: SP31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZW97B5P0 + description: Stacks address or a Contract identifier required: true schema: type: string + example: "SP31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZW97B5P0" - name: limit in: query description: max number of account transactions to fetch @@ -1336,9 +1334,9 @@ paths: in: query description: index of first account transaction to fetch required: false - example: 42000 schema: type: integer + example: 42000 - name: height in: query description: Filter for transactions only at this given block height @@ -1358,9 +1356,9 @@ paths: in: query description: returned data representing the state up until that point in time, rather than the current block. Note - Use either of the query parameters but not both at a time. required: false - example: 60000 schema: type: string + example: 60000 responses: 200: description: Success @@ -1385,14 +1383,14 @@ paths: required: true schema: type: string - example: SP3FBR2AGK5H9QBDH3EEN6DF8EK8JY7RX8QJ5SVTE + example: "SP3FBR2AGK5H9QBDH3EEN6DF8EK8JY7RX8QJ5SVTE" - name: tx_id in: path description: Transaction id required: true schema: type: string - example: 0x34d79c7cfc2fe525438736733e501a4bf0308a5556e3e080d1e2c0858aad7448 + example: "0x34d79c7cfc2fe525438736733e501a4bf0308a5556e3e080d1e2c0858aad7448" responses: 200: description: Success @@ -1423,7 +1421,7 @@ paths: required: true schema: type: string - example: SP31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZW97B5P0 + example: "SP31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZW97B5P0" - name: limit in: query description: max number of account transactions to fetch @@ -1484,7 +1482,7 @@ paths: required: true schema: type: string - example: SP31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZW97B5P0 + example: "SP31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZW97B5P0" - name: block_height in: query description: Optionally get the nonce at a given block height. @@ -1498,7 +1496,7 @@ paths: required: false schema: type: string - example: 0x72d53f3cba39e149dcd42708e535bdae03d73e60d2fe853aaf61c0b392f521e9 + example: "0x72d53f3cba39e149dcd42708e535bdae03d73e60d2fe853aaf61c0b392f521e9" responses: 200: @@ -1522,9 +1520,9 @@ paths: in: path description: Stacks address or a Contract identifier required: true - example: SP31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZW97B5P0 schema: type: string + example: "SP31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZW97B5P0" - name: limit in: query description: max number of account assets to fetch @@ -1578,9 +1576,9 @@ paths: in: path description: Stacks address or a Contract identifier required: true - example: SP31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZW97B5P0 schema: type: string + example: "SP31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZW97B5P0" - name: limit in: query description: number of items to return @@ -1591,9 +1589,9 @@ paths: in: query description: number of items to skip required: false - example: 42000 schema: type: integer + example: 42000 - name: height in: query description: Filter for transfers only at this given block height @@ -1612,9 +1610,9 @@ paths: in: query description: returned data representing the state up until that point in time, rather than the current block. Note - Use either of the query parameters but not both at a time. required: false - example: 60000 schema: type: string + example: 60000 responses: 200: description: Success @@ -1641,9 +1639,9 @@ paths: in: path description: Stacks address or a Contract identifier required: true - example: SP31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZW97B5P0 schema: type: string + example: "SP31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZW97B5P0" - name: limit in: query description: number of items to return @@ -1654,9 +1652,9 @@ paths: in: query description: number of items to skip required: false - example: 42000 schema: type: integer + example: 42000 - name: unanchored in: query description: Include transaction data from unanchored (i.e. unconfirmed) microblocks @@ -1669,9 +1667,9 @@ paths: in: query description: returned data representing the state up until that point in time, rather than the current block. Note - Use either of the query parameters but not both at a time. required: false - example: 60000 schema: type: string + example: 60000 responses: 200: description: Success @@ -1699,9 +1697,9 @@ paths: in: path description: Stacks address or a Contract identifier required: true - example: SP31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZW97B5P0 schema: type: string + example: "SP31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZW97B5P0" - name: proof in: query description: Returns object without the proof field if set to 0 @@ -1926,9 +1924,9 @@ paths: - in: path name: id required: true - example: 0xcf8b233f19f6c07d2dc1963302d2436efd36e9afac127bf6582824a13961c06d schema: type: string + example: "0xcf8b233f19f6c07d2dc1963302d2436efd36e9afac127bf6582824a13961c06d" description: The hex hash string for a block or transaction, account address, or contract address - in: query name: include_metadata @@ -2410,9 +2408,9 @@ paths: in: path description: the namespace to fetch price for required: true - example: id schema: type: string + example: id responses: 200: description: Success @@ -2433,9 +2431,9 @@ paths: in: path description: the name to query price information for required: true - example: muneeb.id schema: type: string + example: muneeb.id responses: 200: description: Success @@ -2473,16 +2471,16 @@ paths: in: path description: the namespace to fetch names from. required: true - example: id schema: type: string + example: id - name: page in: query description: namespace values are defaulted to page 1 with 100 results. You can query specific page results by using the 'page' query parameter. required: false - example: 22 schema: type: integer + example: 22 responses: 200: description: Success @@ -2521,9 +2519,9 @@ paths: in: query description: names are defaulted to page 1 with 100 results. You can query specific page results by using the 'page' query parameter. required: false - example: 22 schema: type: integer + example: 22 responses: 200: description: Success @@ -2554,9 +2552,9 @@ paths: in: path description: fully-qualified name required: true - example: muneeb.id schema: type: string + example: muneeb.id responses: 200: description: Success @@ -2595,9 +2593,9 @@ paths: in: path description: fully-qualified name required: true - example: id.blockstack schema: type: string + example: id.blockstack responses: 200: description: Success @@ -2621,9 +2619,9 @@ paths: in: path description: fully-qualified name required: true - example: bar.test schema: type: string + example: bar.test responses: 200: description: Success @@ -2662,16 +2660,16 @@ paths: in: path description: fully-qualified name required: true - example: muneeb.id schema: type: string + example: muneeb.id - name: zoneFileHash in: path description: zone file hash required: true - example: b100a68235244b012854a95f9114695679002af9 schema: type: string + example: "b100a68235244b012854a95f9114695679002af9" responses: 200: description: Success @@ -2710,16 +2708,16 @@ paths: in: path description: the layer-1 blockchain for the address required: true - example: bitcoin schema: type: string + example: bitcoin - name: address in: path description: the address to lookup required: true - example: 1QJQxDas5JhdiXhEbNS14iNjr8auFT96GP schema: type: string + example: "1QJQxDas5JhdiXhEbNS14iNjr8auFT96GP" responses: 200: description: Success @@ -2783,9 +2781,9 @@ paths: # in: path # description: transaction id # required: true -# example: d04d708472ea3c147f50e43264efdb1535f71974053126dc4db67b3ac19d41fe # schema: # type: string +# example: "d04d708472ea3c147f50e43264efdb1535f71974053126dc4db67b3ac19d41fe" # responses: # 200: # description: Success @@ -2816,23 +2814,23 @@ paths: in: path description: Hash of block required: true - example: 0x0a83d82a65460a9e711f85a44616350280040b75317dbe486a923c1131b5ff99 schema: type: string + example: "0x0a83d82a65460a9e711f85a44616350280040b75317dbe486a923c1131b5ff99" - name: limit in: query description: max number of transactions to fetch required: false - example: 10 schema: type: integer + example: 10 - name: offset in: query description: index of first transaction to fetch required: false - example: 42000 schema: type: integer + example: 42000 responses: 200: description: List of Transactions @@ -2855,28 +2853,27 @@ paths: in: path description: Height of block required: true - example: 66119 schema: type: integer + example: 66119 - name: limit in: query description: max number of transactions to fetch required: false - example: 10 schema: type: integer + example: 10 - name: offset in: query description: index of first transaction to fetch required: false - example: 42000 schema: type: integer + example: 42000 - name: unanchored in: query description: Include transaction data from unanchored (i.e. unconfirmed) microblocks required: false - example: true schema: type: boolean example: true @@ -2903,28 +2900,27 @@ paths: in: path description: Transactions for the address required: true - example: SP197DVH8KTJGX4STM61QN0WJV8Y9QJWXV83ZGNR9 schema: type: string + example: "SP197DVH8KTJGX4STM61QN0WJV8Y9QJWXV83ZGNR9" - name: limit in: query description: max number of transactions to fetch required: false - example: 90 schema: type: integer + example: 90 - name: offset in: query description: index of first transaction to fetch required: false - example: 42000 schema: type: integer + example: 42000 - name: unanchored in: query description: Include transaction data from unanchored (i.e. unconfirmed) microblocks required: false - example: true schema: type: boolean example: true @@ -2959,10 +2955,9 @@ paths: in: query description: index of first tokens to fetch required: false - example: 42000 schema: type: integer - example: 500 + example: 42000 responses: 200: description: List of fungible tokens metadata @@ -2989,16 +2984,16 @@ paths: in: query description: token owner's STX address or Smart Contract ID required: true - example: SPNWZ5V2TPWGQGVDR6T7B6RQ4XMGZ4PXTEE0VQ0S.marketplace-v3 schema: type: string + example: "SPNWZ5V2TPWGQGVDR6T7B6RQ4XMGZ4PXTEE0VQ0S.marketplace-v3" - name: asset_identifiers in: query description: identifiers of the token asset classes to filter for required: false - example: SPQZF23W7SEYBFG5JQ496NMY0G7379SRYEDREMSV.Candy::candy schema: type: array + example: "SPQZF23W7SEYBFG5JQ496NMY0G7379SRYEDREMSV.Candy::candy" items: type: string - name: limit @@ -3012,10 +3007,10 @@ paths: in: query description: index of first tokens to fetch required: false - example: 42000 schema: type: integer default: 0 + example: 42000 - name: unanchored in: query description: whether or not to include tokens from unconfirmed transactions @@ -3061,16 +3056,16 @@ paths: in: query description: token asset class identifier required: true - example: SP2X0TZ59D5SZ8ACQ6YMCHHNR2ZN51Z32E2CJ173.the-explorer-guild::The-Explorer-Guild schema: type: string + example: "SP2X0TZ59D5SZ8ACQ6YMCHHNR2ZN51Z32E2CJ173.the-explorer-guild::The-Explorer-Guild" - name: value in: query description: hex representation of the token's unique value required: true - example: '0x0100000000000000000000000000000803' schema: type: string + example: "0x0100000000000000000000000000000803" - name: limit in: query description: max number of events to fetch @@ -3082,18 +3077,18 @@ paths: in: query description: index of first event to fetch required: false - example: 42000 schema: type: integer default: 0 + example: 42000 - name: unanchored in: query description: whether or not to include events from unconfirmed transactions required: false - example: true schema: type: boolean default: false + example: true - name: tx_metadata in: query description: whether or not to include the complete transaction metadata instead of just `tx_id`. Enabling this option can affect performance and response times. @@ -3131,9 +3126,9 @@ paths: in: query description: token asset class identifier required: true - example: SP2X0TZ59D5SZ8ACQ6YMCHHNR2ZN51Z32E2CJ173.the-explorer-guild::The-Explorer-Guild schema: type: string + example: "SP2X0TZ59D5SZ8ACQ6YMCHHNR2ZN51Z32E2CJ173.the-explorer-guild::The-Explorer-Guild" - name: limit in: query description: max number of events to fetch @@ -3145,10 +3140,10 @@ paths: in: query description: index of first event to fetch required: false - example: 42000 schema: type: integer default: 0 + example: 42000 - name: unanchored in: query description: whether or not to include events from unconfirmed transactions @@ -3198,9 +3193,9 @@ paths: in: query description: index of first tokens to fetch required: false - example: 42000 schema: type: integer + example: 42000 responses: 200: description: List of non fungible tokens metadata @@ -3249,7 +3244,7 @@ paths: required: true schema: type: string - example: SPSCWDV3RKV5ZRN1FQD84YE1NQFEDJ9R1F4DYQ11.newyorkcitycoin-token-v2 + example: "SPSCWDV3RKV5ZRN1FQD84YE1NQFEDJ9R1F4DYQ11.newyorkcitycoin-token-v2" responses: 200: description: Fungible tokens metadata for contract id @@ -3382,37 +3377,37 @@ paths: in: query description: Hash of transaction required: false - example: 0x29e25515652dad41ef675bd0670964e3d537b80ec19cf6ca6f1dd65d5bc642c5 + example: "0x29e25515652dad41ef675bd0670964e3d537b80ec19cf6ca6f1dd65d5bc642c5" schema: type: string - name: address in: query description: Stacks address or a Contract identifier required: false - example: ST1HB64MAJ1MBV4CQ80GF01DZS4T1DSMX20ADCRA4 schema: type: string + example: "ST1HB64MAJ1MBV4CQ80GF01DZS4T1DSMX20ADCRA4" - name: limit in: query description: number of items to return required: false - example: 100 schema: type: integer + example: 100 - name: offset in: query description: number of items to skip required: false - example: 42000 schema: type: integer + example: 42000 - name: type in: query description: Filter the events on event type required: false - example: stx_lock schema: type: array + example: stx_lock items: type: string enum: [smart_contract_log, stx_lock, stx_asset, fungible_token_asset, non_fungible_token_asset] From a10ac03b9df16de556ca17a063eade38f5ce820b Mon Sep 17 00:00:00 2001 From: Lavanya Kasturi Date: Wed, 24 Aug 2022 16:26:06 -0500 Subject: [PATCH 07/24] docs: added examples for Rosetta API's - AccountIdentifier, Hash and Construction (#1288) * Added examples for construction, account and hash API's * docs: cleaned up OpenAPI.yaml for consistency with all the examples (#1282) * Added examples for missing API's * fix the hash example for v1/block/{hash} * Fixed examples for string types * Updated examples to follow yaml syntax * Examples for Rosetta and Smart contracts * Reframed a sentence --- ...ifier-publickey-request-body.example.json} | 2 +- ...-metadata-request-schema-body.example.json | 34 ++++++++++++++++ ...ansaction-request-schema-body.example.json | 7 ++++ ...ction-from-signarures-request.example.json | 26 ++++++++++++ ...it-signed-transaction-request.example.json | 7 ++++ ...ion-unsigned-payloads-request.example.json | 27 +++++++++++++ ...ific-data-map-inside-contract.example.json | 1 + ...ly-function-args-request-body.example.json | 6 +++ docs/openapi.yaml | 40 +++++++++++++------ 9 files changed, 136 insertions(+), 14 deletions(-) rename docs/api/rosetta/{rosetta-account-identifier-request-body.example.json => rosetta-account-identifier-publickey-request-body.example.json} (64%) create mode 100644 docs/api/rosetta/rosetta-construction-metadata-request-schema-body.example.json create mode 100644 docs/api/rosetta/rosetta-hash-signed-transaction-request-schema-body.example.json create mode 100644 docs/api/rosetta/rosetta-network-transaction-from-signarures-request.example.json create mode 100644 docs/api/rosetta/rosetta-submit-signed-transaction-request.example.json create mode 100644 docs/api/rosetta/rosetta-submit-unsigned-transaction-unsigned-payloads-request.example.json create mode 100644 docs/entities/contracts/get-specific-data-map-inside-contract.example.json create mode 100644 docs/entities/contracts/read-only-function-args-request-body.example.json diff --git a/docs/api/rosetta/rosetta-account-identifier-request-body.example.json b/docs/api/rosetta/rosetta-account-identifier-publickey-request-body.example.json similarity index 64% rename from docs/api/rosetta/rosetta-account-identifier-request-body.example.json rename to docs/api/rosetta/rosetta-account-identifier-publickey-request-body.example.json index dad0e3d1..bbc514cd 100644 --- a/docs/api/rosetta/rosetta-account-identifier-request-body.example.json +++ b/docs/api/rosetta/rosetta-account-identifier-publickey-request-body.example.json @@ -4,7 +4,7 @@ "network": "mainnet" }, "public_key": { - "hex_bytes": "022d82baea2d041ac281bebafab11571f45db4f163a9e3f8640b1c804a4ac6f662", + "hex_bytes": "025c13b2fc2261956d8a4ad07d481b1a3b2cbf93a24f992249a61c3a1c4de79c51", "curve_type": "secp256k1" }, "metadata": {} diff --git a/docs/api/rosetta/rosetta-construction-metadata-request-schema-body.example.json b/docs/api/rosetta/rosetta-construction-metadata-request-schema-body.example.json new file mode 100644 index 00000000..cd1bb7f2 --- /dev/null +++ b/docs/api/rosetta/rosetta-construction-metadata-request-schema-body.example.json @@ -0,0 +1,34 @@ +{ + "network_identifier": { + "blockchain": "stacks", + "network": "mainnet" + }, + "options": { + "sender_address": "SP3Y0BBCCCBFAMYCYN3F35CX9MH1J2GATP53JX3FA", + "type": "stack_stx", + "status": "success", + "token_transfer_recipient_address": "string", + "amount": "500000", + "symbol": "STX", + "decimals": 6, + "gas_limit": 0, + "gas_price": 0, + "suggested_fee_multiplier": 0, + "max_fee": "12380898", + "fee": "fee", + "size": 260, + "memo": "test.memo", + "number_of_cycles": 0, + "contract_address": "SP112T7BYCNEDCZ9TCYXCXFNJG9WXX5Q5SG6DSBAM", + "contract_name": "STX transfer", + "burn_block_height": 0, + "delegate_to": "cb3df38053d132895220b9ce471f6b676db5b9bf0b4adefb55f2118ece2478df01.STB44HYPYAT2BB2QE513NSP81HTMYWBJP02HPGK6", + "pox_addr": "1Xik14zRm29UsyS6DjhYg4iZeZqsDa8D3" + }, + "public_keys": [ + { + "hex_bytes": "publicKey", + "curve_type": "secp256k1" + } + ] +} diff --git a/docs/api/rosetta/rosetta-hash-signed-transaction-request-schema-body.example.json b/docs/api/rosetta/rosetta-hash-signed-transaction-request-schema-body.example.json new file mode 100644 index 00000000..da5c3bb3 --- /dev/null +++ b/docs/api/rosetta/rosetta-hash-signed-transaction-request-schema-body.example.json @@ -0,0 +1,7 @@ +{ + "network_identifier": { + "blockchain": "stacks", + "network": "mainnet" + }, + "signed_transaction": "0x80800000000400539886f96611ba3ba6cef9618f8c78118b37c5be000000000000000000000000000000b400017a33a91515ef48608a99c6adecd2eb258e11534a1acf66348f5678c8e2c8f83d243555ed67a0019d3500df98563ca31321c1a675b43ef79f146e322fe08df75103020000000000051a1ae3f911d8f1d46d7416bfbe4b593fd41eac19cb000000000007a12000000000000000000000000000000000000000000000000000000000000000000000" +} diff --git a/docs/api/rosetta/rosetta-network-transaction-from-signarures-request.example.json b/docs/api/rosetta/rosetta-network-transaction-from-signarures-request.example.json new file mode 100644 index 00000000..340f8e45 --- /dev/null +++ b/docs/api/rosetta/rosetta-network-transaction-from-signarures-request.example.json @@ -0,0 +1,26 @@ +{ + "network_identifier": { + "blockchain": "stacks", + "network": "mainnet" + }, + "unsigned_transaction": "00000000010400539886f96611ba3ba6cef9618f8c78118b37c5be0000000000000000000000000000006400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003020000000000051ab71a091b4b8b7661a661c620966ab6573bc2dcd3000000000007a12074657374207472616e73616374696f6e000000000000000000000000000000000000", + "signatures": [ + { + "signing_payload": { + "address": "string", + "account_identifier": { + "address": "STB44HYPYAT2BB2QE513NSP81HTMYWBJP02HPGK6", + "metadata": {} + }, + "hex_bytes": "string", + "signature_type": "ecdsa" + }, + "public_key": { + "hex_bytes": "025c13b2fc2261956d8a4ad07d481b1a3b2cbf93a24f992249a61c3a1c4de79c51", + "curve_type": "secp256k1" + }, + "signature_type": "ecdsa", + "hex_bytes": "string" + } + ] +} diff --git a/docs/api/rosetta/rosetta-submit-signed-transaction-request.example.json b/docs/api/rosetta/rosetta-submit-signed-transaction-request.example.json new file mode 100644 index 00000000..da5c3bb3 --- /dev/null +++ b/docs/api/rosetta/rosetta-submit-signed-transaction-request.example.json @@ -0,0 +1,7 @@ +{ + "network_identifier": { + "blockchain": "stacks", + "network": "mainnet" + }, + "signed_transaction": "0x80800000000400539886f96611ba3ba6cef9618f8c78118b37c5be000000000000000000000000000000b400017a33a91515ef48608a99c6adecd2eb258e11534a1acf66348f5678c8e2c8f83d243555ed67a0019d3500df98563ca31321c1a675b43ef79f146e322fe08df75103020000000000051a1ae3f911d8f1d46d7416bfbe4b593fd41eac19cb000000000007a12000000000000000000000000000000000000000000000000000000000000000000000" +} diff --git a/docs/api/rosetta/rosetta-submit-unsigned-transaction-unsigned-payloads-request.example.json b/docs/api/rosetta/rosetta-submit-unsigned-transaction-unsigned-payloads-request.example.json new file mode 100644 index 00000000..d880e6a3 --- /dev/null +++ b/docs/api/rosetta/rosetta-submit-unsigned-transaction-unsigned-payloads-request.example.json @@ -0,0 +1,27 @@ +{ + "network_identifier": { + "blockchain": "stacks", + "network": "mainnet" + }, + "operations": [ + { + "operation_identifier": { + "index": 0, + "network_index": 0 + }, + "related_operations": [ + { + "index": 0, + "network_index": 0 + } + ], + "type": "stack_stx", + "status": "success", + "account": { + "address": "STB44HYPYAT2BB2QE513NSP81HTMYWBJP02HPGK6", + "metadata": {} + } + + } + ] +} diff --git a/docs/entities/contracts/get-specific-data-map-inside-contract.example.json b/docs/entities/contracts/get-specific-data-map-inside-contract.example.json new file mode 100644 index 00000000..8e470ae6 --- /dev/null +++ b/docs/entities/contracts/get-specific-data-map-inside-contract.example.json @@ -0,0 +1 @@ +"0x0100000000000000000000000000000095" diff --git a/docs/entities/contracts/read-only-function-args-request-body.example.json b/docs/entities/contracts/read-only-function-args-request-body.example.json new file mode 100644 index 00000000..80ac10ff --- /dev/null +++ b/docs/entities/contracts/read-only-function-args-request-body.example.json @@ -0,0 +1,6 @@ +{ + "sender": "STM9EQRAB3QAKF8NKTP15WJT7VHH4EWG3DJB4W29", + "arguments": [ + "0x0100000000000000000000000000000095" + ] +} diff --git a/docs/openapi.yaml b/docs/openapi.yaml index 329c25bf..2a3d5f25 100644 --- a/docs/openapi.yaml +++ b/docs/openapi.yaml @@ -601,9 +601,9 @@ paths: in: query description: index of first block to fetch required: false - example: 42000 schema: type: integer + example: 42000 responses: 200: description: List of blocks @@ -1085,20 +1085,21 @@ paths: description: Stacks address schema: type: string - example: "SP6P4EJF0VG8V0RB3TQQKJBHDQKEF6NVRD1KZE3C" + example: "SPSCWDV3RKV5ZRN1FQD84YE1NQFEDJ9R1F4DYQ11" - name: contract_name in: path required: true description: Contract name schema: type: string - example: satoshibles + example: newyorkcitycoin-core-v2 - name: map_name in: path required: true description: Map name schema: type: string + example: approved-contracts - name: proof in: query description: Returns object without the proof field when set to 0 @@ -1117,6 +1118,8 @@ paths: application/json: schema: type: string + example: + $ref: ./entities/contracts/get-specific-data-map-inside-contract.example.json /v2/contracts/source/{contract_address}/{contract_name}: get: @@ -1192,20 +1195,21 @@ paths: description: Stacks address schema: type: string - example: "SP6P4EJF0VG8V0RB3TQQKJBHDQKEF6NVRD1KZE3C" + example: "SP187Y7NRSG3T9Z9WTSWNEN3XRV1YSJWS81C7JKV7" - name: contract_name in: path required: true description: Contract name schema: type: string - example: satoshibles + example: imaginary-friends-zebras - name: function_name in: path required: true description: Function name schema: type: string + example: get-token-uri - name: tip in: query schema: @@ -1219,11 +1223,8 @@ paths: application/json: schema: $ref: './entities/contracts/read-only-function-args.schema.json' - example: - sender: 'SP31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZW97B5P0.get-info' - arguments: - - '0x0011...' - - '0x00231...' + example: + $ref: './entities/contracts/read-only-function-args-request-body.example.json' /extended/v1/address/{principal}/balances: get: @@ -2205,6 +2206,8 @@ paths: application/json: schema: $ref: ./api/rosetta/rosetta-construction-derive-request.schema.json + example: + $ref: ./api/rosetta/rosetta-account-identifier-publickey-request-body.example.json /rosetta/v1/construction/hash: post: @@ -2232,6 +2235,8 @@ paths: application/json: schema: $ref: ./api/rosetta/rosetta-construction-hash-request.schema.json + example: + $ref: ./api/rosetta/rosetta-hash-signed-transaction-request-schema-body.example.json /rosetta/v1/construction/metadata: post: @@ -2259,6 +2264,8 @@ paths: application/json: schema: $ref: ./api/rosetta/rosetta-construction-metadata-request.schema.json + example: + $ref: ./api/rosetta/rosetta-construction-metadata-request-schema-body.example.json /rosetta/v1/construction/parse: post: @@ -2321,7 +2328,7 @@ paths: - Rosetta summary: Submit a Signed Transaction operationId: rosetta_construction_submit - description: Submit a pre-signed transaction to the node. + description: Submit a pre-signed transaction to the node. The examples below are illustrative only. You'll need to use your wallet to generate actual values to use them in the request payload. responses: 200: description: Success @@ -2341,6 +2348,8 @@ paths: application/json: schema: $ref: ./api/rosetta/rosetta-construction-submit-request.schema.json + example: + $ref: ./api/rosetta/rosetta-submit-signed-transaction-request.example.json /rosetta/v1/construction/payloads: post: @@ -2348,7 +2357,7 @@ paths: - Rosetta summary: Generate an Unsigned Transaction and Signing Payloads operationId: rosetta_construction_payloads - description: Generate and unsigned transaction from operations and metadata + description: Generate an unsigned transaction from operations and metadata. The examples below are illustrative only. You'll need to use your wallet to generate actual values to use them in the request payload. responses: 200: description: Success @@ -2368,6 +2377,8 @@ paths: application/json: schema: $ref: ./api/rosetta/rosetta-construction-payloads-request.schema.json + example: + $ref: ./api/rosetta/rosetta-submit-unsigned-transaction-unsigned-payloads-request.example.json /rosetta/v1/construction/combine: post: @@ -2375,7 +2386,7 @@ paths: - Rosetta summary: Create Network Transaction from Signatures operationId: rosetta_construction_combine - description: Take unsigned transaction and signature, combine both and return signed transaction + description: Take unsigned transaction and signature, combine both and return signed transaction. The examples below are illustrative only. You'll need to use your wallet to generate actual values to use them in the request payload. responses: 200: description: Success @@ -2395,6 +2406,8 @@ paths: application/json: schema: $ref: ./api/rosetta/rosetta-construction-combine-request.schema.json + example: + $ref: ./api/rosetta/rosetta-network-transaction-from-signarures-request.example.json /v2/prices/namespaces/{tld}: get: @@ -3210,6 +3223,7 @@ paths: get: operationId: get_contract_nft_metadata summary: Non fungible tokens metadata for contract ID + deprecated: true description: Retrieves metadata for non fungible tokens for a given contract id. More information on Non-Fungible Tokens on the Stacks blockchain can be found [here](https://docs.stacks.co/write-smart-contracts/tokens#non-fungible-tokens-nfts). tags: - Non-Fungible Tokens From 1f648187b8c701e802a06bac52b077fd10571ff7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafael=20C=C3=A1rdenas?= Date: Fri, 26 Aug 2022 10:14:37 -0500 Subject: [PATCH 08/24] feat!: optimize tables and improve canonical treatment of BNS data (#1287) * refactor: re-order helper files * fix: new columns for zonefiles * feat: batch update subdomains * fix: optimize some queries * fix: api tests * fix: optimize more queries, add cache handlers * fix: some integration tests * fix: all integration tests * fix: v1 import test individually * fix: move v1 import tests to separate file * fix: start adding on conflict to subdomains * fix: add uniqueness to names, subdomains and namespaces * fix: remove extra index * refactor: remove unused tx function * fix: unused exports * feat: echo block height while doing replays * refactor: separate batch updates * fix: calculate expire_block for a name based on namespace lifetime * fix: test strict equal * fix: remove secondary temporary table for event import * fix: test sorting * fix: detect namespaces readyed by third party contracts * fix: bigint for namespaces * refactor: move bns helper tests to bns suite * chore: make event test less verbose * chore: temporarily disable migrations on boot * chore: add verbose logs to migration * chore: make migration verbosity depend on log level * fix: detect names from print event * test: re-orgd attachments * test: duplicate zonefile updates --- src/api/routes/bns/addresses.ts | 8 + src/api/routes/bns/names.ts | 66 +- src/api/routes/bns/namespaces.ts | 14 +- src/api/routes/bns/pricing.ts | 2 +- src/datastore/common.ts | 35 +- src/datastore/postgres-store.ts | 663 ++++++++++++-------- src/event-replay/event-replay.ts | 3 + src/{ => event-stream/bns}/bns-constants.ts | 9 - src/{ => event-stream/bns}/bns-helpers.ts | 133 ++-- src/event-stream/core-node-message.ts | 2 +- src/event-stream/event-server.ts | 183 ++---- src/helpers.ts | 8 +- src/import-v1/index.ts | 22 +- src/migrations/1608030374841_namespaces.ts | 17 +- src/migrations/1608030374842_names.ts | 16 +- src/migrations/1610030345948_subdomains.ts | 17 +- src/migrations/1626441820095_zonefiles.ts | 19 +- src/test-utils/test-builders.ts | 65 ++ src/tests-bns/api.ts | 43 +- src/tests-bns/bns-helpers-tests.ts | 95 +++ src/tests-bns/bns-integration-tests.ts | 123 ++-- src/tests-bns/event-server-tests.ts | 305 +++++++++ src/tests-bns/v1-import-tests.ts | 161 +++++ src/tests/bns-helpers-tests.ts | 95 --- src/tests/datastore-tests.ts | 19 +- 25 files changed, 1401 insertions(+), 722 deletions(-) rename src/{ => event-stream/bns}/bns-constants.ts (70%) rename src/{ => event-stream/bns}/bns-helpers.ts (67%) create mode 100644 src/tests-bns/bns-helpers-tests.ts create mode 100644 src/tests-bns/event-server-tests.ts create mode 100644 src/tests-bns/v1-import-tests.ts delete mode 100644 src/tests/bns-helpers-tests.ts diff --git a/src/api/routes/bns/addresses.ts b/src/api/routes/bns/addresses.ts index 3648da94..ff890909 100644 --- a/src/api/routes/bns/addresses.ts +++ b/src/api/routes/bns/addresses.ts @@ -3,13 +3,20 @@ import { asyncHandler } from '../../async-handler'; import { DataStore } from '../../../datastore/common'; import { isUnanchoredRequest } from '../../query-helpers'; import { ChainID } from '@stacks/transactions'; +import { + getETagCacheHandler, + setETagCacheHeaders, +} from '../../../api/controllers/cache-controller'; const SUPPORTED_BLOCKCHAINS = ['stacks']; export function createBnsAddressesRouter(db: DataStore, chainId: ChainID): express.Router { const router = express.Router(); + const cacheHandler = getETagCacheHandler(db); + router.get( '/:blockchain/:address', + cacheHandler, asyncHandler(async (req, res, next) => { // Retrieves a list of names owned by the address provided. const { blockchain, address } = req.params; @@ -23,6 +30,7 @@ export function createBnsAddressesRouter(db: DataStore, chainId: ChainID): expre includeUnanchored, chainId, }); + setETagCacheHeaders(res); if (namesByAddress.found) { res.json({ names: namesByAddress.result }); } else { diff --git a/src/api/routes/bns/names.ts b/src/api/routes/bns/names.ts index 30cb2788..dc416fb8 100644 --- a/src/api/routes/bns/names.ts +++ b/src/api/routes/bns/names.ts @@ -3,92 +3,85 @@ import { asyncHandler } from '../../async-handler'; import { DataStore } from '../../../datastore/common'; import { parsePagingQueryInput } from '../../../api/pagination'; import { isUnanchoredRequest } from '../../query-helpers'; -import { bnsBlockchain, BnsErrors } from '../../../bns-constants'; +import { bnsBlockchain, BnsErrors } from '../../../event-stream/bns/bns-constants'; import { BnsGetNameInfoResponse } from '@stacks/stacks-blockchain-api-types'; import { ChainID } from '@stacks/transactions'; +import { + getETagCacheHandler, + setETagCacheHeaders, +} from '../../../api/controllers/cache-controller'; export function createBnsNamesRouter(db: DataStore, chainId: ChainID): express.Router { const router = express.Router(); + const cacheHandler = getETagCacheHandler(db); router.get( '/:name/zonefile/:zoneFileHash', + cacheHandler, asyncHandler(async (req, res, next) => { - // Fetches the historical zonefile specified by the username and zone hash. const { name, zoneFileHash } = req.params; const includeUnanchored = isUnanchoredRequest(req, res, next); - let nameFound = false; - const nameQuery = await db.getName({ name: name, includeUnanchored, chainId: chainId }); - nameFound = nameQuery.found; - if (!nameFound) { - const subdomainQuery = await db.getSubdomain({ subdomain: name, includeUnanchored }); - nameFound = subdomainQuery.found; - } - - if (nameFound) { - const zonefile = await db.getHistoricalZoneFile({ name: name, zoneFileHash: zoneFileHash }); - if (zonefile.found) { - res.json(zonefile.result); - } else { - res.status(404).json({ error: 'No such zonefile' }); - } + const zonefile = await db.getHistoricalZoneFile({ + name: name, + zoneFileHash: zoneFileHash, + includeUnanchored, + }); + if (zonefile.found) { + setETagCacheHeaders(res); + res.json(zonefile.result); } else { - res.status(400).json({ error: 'Invalid name or subdomain' }); + res.status(404).json({ error: 'No such name or zonefile' }); } }) ); router.get( '/:name/subdomains', + cacheHandler, asyncHandler(async (req, res, next) => { const { name } = req.params; const includeUnanchored = isUnanchoredRequest(req, res, next); const subdomainsList = await db.getSubdomainsListInName({ name, includeUnanchored }); + setETagCacheHeaders(res); res.json(subdomainsList.results); }) ); router.get( '/:name/zonefile', + cacheHandler, asyncHandler(async (req, res, next) => { - // Fetch a user’s raw zone file. This only works for RFC-compliant zone files. This method returns an error for names that have non-standard zone files. const { name } = req.params; const includeUnanchored = isUnanchoredRequest(req, res, next); - let nameFound = false; - const nameQuery = await db.getName({ name: name, includeUnanchored, chainId: chainId }); - nameFound = nameQuery.found; - if (!nameFound) { - const subdomainQuery = await db.getSubdomain({ subdomain: name, includeUnanchored }); - nameFound = subdomainQuery.found; - } - - if (nameFound) { - const zonefile = await db.getLatestZoneFile({ name: name, includeUnanchored }); - if (zonefile.found) { - res.json(zonefile.result); - } else { - res.status(404).json({ error: 'No zone file for name' }); - } + const zonefile = await db.getLatestZoneFile({ name: name, includeUnanchored }); + if (zonefile.found) { + setETagCacheHeaders(res); + res.json(zonefile.result); } else { - res.status(400).json({ error: 'Invalid name or subdomain' }); + res.status(404).json({ error: 'No such name or zonefile does not exist' }); } }) ); router.get( '/', + cacheHandler, asyncHandler(async (req, res, next) => { const page = parsePagingQueryInput(req.query.page ?? 0); const includeUnanchored = isUnanchoredRequest(req, res, next); const { results } = await db.getNamesList({ page, includeUnanchored }); if (results.length === 0 && req.query.page) { res.status(400).json(BnsErrors.InvalidPageNumber); + } else { + setETagCacheHeaders(res); + res.json(results); } - res.json(results); }) ); router.get( '/:name', + cacheHandler, asyncHandler(async (req, res, next) => { const { name } = req.params; const includeUnanchored = isUnanchoredRequest(req, res, next); @@ -149,6 +142,7 @@ export function createBnsNamesRouter(db: DataStore, chainId: ChainID): express.R const response = Object.fromEntries( Object.entries(nameInfoResponse).filter(([_, v]) => v != null) ); + setETagCacheHeaders(res); res.json(response); }) ); diff --git a/src/api/routes/bns/namespaces.ts b/src/api/routes/bns/namespaces.ts index 3ed03311..ee7237f0 100644 --- a/src/api/routes/bns/namespaces.ts +++ b/src/api/routes/bns/namespaces.ts @@ -3,20 +3,27 @@ import { asyncHandler } from '../../async-handler'; import { DataStore } from '../../../datastore/common'; import { parsePagingQueryInput } from '../../../api/pagination'; import { isUnanchoredRequest } from '../../query-helpers'; -import { BnsErrors } from '../../../bns-constants'; +import { BnsErrors } from '../../../event-stream/bns/bns-constants'; import { BnsGetAllNamespacesResponse } from '@stacks/stacks-blockchain-api-types'; +import { + getETagCacheHandler, + setETagCacheHeaders, +} from '../../../api/controllers/cache-controller'; export function createBnsNamespacesRouter(db: DataStore): express.Router { const router = express.Router(); + const cacheHandler = getETagCacheHandler(db); router.get( '/', + cacheHandler, asyncHandler(async (req, res, next) => { const includeUnanchored = isUnanchoredRequest(req, res, next); const { results } = await db.getNamespaceList({ includeUnanchored }); const response: BnsGetAllNamespacesResponse = { namespaces: results, }; + setETagCacheHeaders(res); res.json(response); return; }) @@ -24,6 +31,7 @@ export function createBnsNamespacesRouter(db: DataStore): express.Router { router.get( '/:tld/names', + cacheHandler, asyncHandler(async (req, res, next) => { const { tld } = req.params; const page = parsePagingQueryInput(req.query.page ?? 0); @@ -39,8 +47,10 @@ export function createBnsNamespacesRouter(db: DataStore): express.Router { }); if (results.length === 0 && req.query.page) { res.status(400).json(BnsErrors.InvalidPageNumber); + } else { + setETagCacheHeaders(res); + res.json(results); } - res.json(results); } }) ); diff --git a/src/api/routes/bns/pricing.ts b/src/api/routes/bns/pricing.ts index d0c5cb83..7326b6a6 100644 --- a/src/api/routes/bns/pricing.ts +++ b/src/api/routes/bns/pricing.ts @@ -14,12 +14,12 @@ import { listCV, ChainID, } from '@stacks/transactions'; -import { GetStacksNetwork, getBnsContractID } from './../../../bns-helpers'; import { BnsGetNamePriceResponse, BnsGetNamespacePriceResponse, } from '@stacks/stacks-blockchain-api-types'; import { isValidPrincipal, logger } from './../../../helpers'; +import { getBnsContractID, GetStacksNetwork } from '../../../event-stream/bns/bns-helpers'; export function createBnsPriceRouter(db: DataStore, chainId: ChainID): express.Router { const router = express.Router(); diff --git a/src/datastore/common.ts b/src/datastore/common.ts index e257b990..3edf7f87 100644 --- a/src/datastore/common.ts +++ b/src/datastore/common.ts @@ -428,6 +428,31 @@ export interface DataStoreTxEventData { namespaces: DbBnsNamespace[]; } +export interface DataStoreAttachmentData { + op: string; + name: string; + namespace: string; + zonefile: string; + zonefileHash: string; + txId: string; + indexBlockHash: string; + blockHeight: number; +} + +export interface DataStoreSubdomainBlockData { + index_block_hash: string; + parent_index_block_hash: string; + microblock_hash: string; + microblock_sequence: number; + microblock_canonical: boolean; +} + +export interface DataStoreAttachmentSubdomainData { + attachment?: DataStoreAttachmentData; + blockData?: DataStoreSubdomainBlockData; + subdomains?: DbBnsSubdomain[]; +} + export interface DbSearchResult { entity_type: 'standard_address' | 'contract_address' | 'block_hash' | 'tx_id' | 'mempool_tx_id'; entity_id: string; @@ -472,6 +497,7 @@ export interface DbInboundStxTransfer { export interface DbBnsZoneFile { zonefile: string; } + export interface DbBnsNamespace { id?: number; namespace_id: string; @@ -480,8 +506,8 @@ export interface DbBnsNamespace { reveal_block: number; ready_block: number; buckets: string; - base: number; - coeff: number; + base: bigint; + coeff: bigint; nonalpha_discount: number; no_vowel_discount: number; lifetime: number; @@ -683,7 +709,6 @@ export interface DataStore extends DataStoreEventEmitter { limit: number; offset: number; }): Promise<{ results: DbMempoolTx[]; total: number }>; - getTxStrict(args: { txId: string; indexBlockHash: string }): Promise>; getTx(args: { txId: string; includeUnanchored: boolean }): Promise>; getTxList(args: { limit: number; @@ -744,7 +769,8 @@ export interface DataStore extends DataStoreEventEmitter { updateMicroblocks(data: DataStoreMicroblockUpdateData): Promise; - updateZoneContent(zonefile: string, zonefile_hash: string, tx_id: string): Promise; + updateAttachments(attachments: DataStoreAttachmentData[]): Promise; + resolveBnsSubdomains( blockData: { index_block_hash: string; @@ -948,6 +974,7 @@ export interface DataStore extends DataStoreEventEmitter { getHistoricalZoneFile(args: { name: string; zoneFileHash: string; + includeUnanchored: boolean; }): Promise>; getLatestZoneFile(args: { name: string; diff --git a/src/datastore/postgres-store.ts b/src/datastore/postgres-store.ts index bc202f2d..2c9fd6b8 100644 --- a/src/datastore/postgres-store.ts +++ b/src/datastore/postgres-store.ts @@ -42,6 +42,8 @@ import { bnsNameCV, getBnsSmartContractId, bnsHexValueToName, + I32_MAX, + defaultLogLevel, } from '../helpers'; import { DataStore, @@ -98,6 +100,9 @@ import { NftEventWithTxMetadata, DbAssetEventTypeId, DbTxGlobalStatus, + DataStoreAttachmentData, + DataStoreSubdomainBlockData, + DataStoreAttachmentSubdomainData, } from './common'; import { AddressTokenOfferingLocked, @@ -120,6 +125,8 @@ import { PgTokensNotificationPayload, PgTxNotificationPayload, } from './postgres-notifier'; +import * as zoneFileParser from 'zone-file'; +import { parseResolver, parseZoneFileTxt } from '../event-stream/bns/bns-helpers'; const MIGRATIONS_TABLE = 'pgmigrations'; const MIGRATIONS_DIR = path.join(APP_DIR, 'migrations'); @@ -237,6 +244,7 @@ export async function runMigrations( warn: msg => logger.warn(msg), error: msg => logger.error(msg), }, + verbose: defaultLogLevel === 'verbose', }; if (clientConfig.schema) { runnerOpts.schema = clientConfig.schema; @@ -1029,23 +1037,11 @@ export class PgDataStore payload jsonb NOT NULL ) ON COMMIT DROP `); - // Use a `temp_raw_tsv` table first to store the raw TSV data as it might come with duplicate - // rows which would trigger the `PRIMARY KEY` constraint in `temp_event_observer_requests`. - // We will "upsert" from the former to the latter before event ingestion. - await client.query(` - CREATE TEMPORARY TABLE temp_raw_tsv - (LIKE temp_event_observer_requests) - ON COMMIT DROP - `); onStatusUpdate?.('Importing raw event requests into temporary table...'); - const importStream = client.query(pgCopyStreams.from(`COPY temp_raw_tsv FROM STDIN`)); + const importStream = client.query( + pgCopyStreams.from(`COPY temp_event_observer_requests FROM STDIN`) + ); await pipelineAsync(readStream, importStream); - await client.query(` - INSERT INTO temp_event_observer_requests - SELECT * - FROM temp_raw_tsv - ON CONFLICT DO NOTHING; - `); const totalRowCountQuery = await client.query<{ count: string }>( `SELECT COUNT(id) count FROM temp_event_observer_requests` ); @@ -2135,22 +2131,6 @@ export class PgDataStore }); } - async updateZoneContent(zonefile: string, zonefile_hash: string, tx_id: string): Promise { - await this.queryTx(async client => { - // inserting zonefile into zonefiles table - const validZonefileHash = this.validateZonefileHash(zonefile_hash); - await client.query( - ` - UPDATE zonefiles - SET zonefile = $1 - WHERE zonefile_hash = $2 - `, - [zonefile, validZonefileHash] - ); - }); - await this.notifier?.sendName({ nameInfo: tx_id }); - } - private validateZonefileHash(zonefileHash: string) { // this function removes the `0x` from the incoming zonefile hash, either for insertion or search. const index = zonefileHash.indexOf('0x'); @@ -2160,6 +2140,91 @@ export class PgDataStore return zonefileHash; } + async updateAttachments(attachments: DataStoreAttachmentData[]): Promise { + await this.queryTx(async client => { + // Each attachment will batch insert zonefiles for name and all subdomains that apply. + for (const attachment of attachments) { + const subdomainData: DataStoreAttachmentSubdomainData[] = []; + if (attachment.op === 'name-update') { + // If this is a zonefile update, break it down into subdomains and update all of them. We + // must find the correct transaction that registered the zonefile in the first place and + // associate it with each entry. + const zonefile = Buffer.from(attachment.zonefile, 'hex').toString(); + const zoneFileContents = zoneFileParser.parseZoneFile(zonefile); + const zoneFileTxt = zoneFileContents.txt; + if (zoneFileTxt && zoneFileTxt.length > 0) { + const dbTx = await client.query( + `SELECT ${txColumns()} FROM txs + WHERE tx_id = $1 AND index_block_hash = $2 + ORDER BY canonical DESC, microblock_canonical DESC, block_height DESC + LIMIT 1`, + [hexToBuffer(attachment.txId), hexToBuffer(attachment.indexBlockHash)] + ); + let isCanonical = true; + let txIndex = -1; + const blockData: DataStoreSubdomainBlockData = { + index_block_hash: '', + parent_index_block_hash: '', + microblock_hash: '', + microblock_sequence: I32_MAX, + microblock_canonical: true, + }; + if (dbTx.rowCount > 0) { + const parsedDbTx = this.parseTxQueryResult(dbTx.rows[0]); + isCanonical = parsedDbTx.canonical; + txIndex = parsedDbTx.tx_index; + blockData.index_block_hash = parsedDbTx.index_block_hash; + blockData.parent_index_block_hash = parsedDbTx.parent_index_block_hash; + blockData.microblock_hash = parsedDbTx.microblock_hash; + blockData.microblock_sequence = parsedDbTx.microblock_sequence; + blockData.microblock_canonical = parsedDbTx.microblock_canonical; + } else { + logger.warn( + `Could not find transaction ${attachment.txId} associated with attachment` + ); + } + const subdomains: DbBnsSubdomain[] = []; + for (let i = 0; i < zoneFileTxt.length; i++) { + const zoneFile = zoneFileTxt[i]; + const parsedTxt = parseZoneFileTxt(zoneFile.txt); + if (parsedTxt.owner === '') continue; //if txt has no owner , skip it + const subdomain: DbBnsSubdomain = { + name: attachment.name.concat('.', attachment.namespace), + namespace_id: attachment.namespace, + fully_qualified_subdomain: zoneFile.name.concat( + '.', + attachment.name, + '.', + attachment.namespace + ), + owner: parsedTxt.owner, + zonefile_hash: parsedTxt.zoneFileHash, + zonefile: parsedTxt.zoneFile, + tx_id: attachment.txId, + tx_index: txIndex, + canonical: isCanonical, + parent_zonefile_hash: attachment.zonefileHash.slice(2), + parent_zonefile_index: 0, + block_height: attachment.blockHeight, + zonefile_offset: 1, + resolver: zoneFileContents.uri ? parseResolver(zoneFileContents.uri) : '', + }; + subdomains.push(subdomain); + } + subdomainData.push({ blockData, subdomains, attachment: attachment }); + } + } + await this.updateBatchSubdomains(client, subdomainData); + await this.updateBatchZonefiles(client, subdomainData); + // Update the name's zonefile as well. + await this.updateBatchZonefiles(client, [{ attachment }]); + } + }); + for (const txId of attachments.map(a => a.txId)) { + await this.notifier?.sendName({ nameInfo: txId }); + } + } + async resolveBnsSubdomains( blockData: { index_block_hash: string; @@ -2172,7 +2237,8 @@ export class PgDataStore ): Promise { if (data.length == 0) return; await this.queryTx(async client => { - await this.updateBatchSubdomains(client, blockData, data); + await this.updateBatchSubdomains(client, [{ blockData, subdomains: data }]); + await this.updateBatchZonefiles(client, [{ blockData, subdomains: data }]); }); } @@ -4014,27 +4080,6 @@ export class PgDataStore }); } - async getTxStrict(args: { txId: string; indexBlockHash: string }): Promise> { - return this.query(async client => { - const result = await client.query( - ` - SELECT ${TX_COLUMNS}, ${abiColumn()} - FROM txs - WHERE tx_id = $1 AND index_block_hash = $2 - ORDER BY canonical DESC, microblock_canonical DESC, block_height DESC - LIMIT 1 - `, - [hexToBuffer(args.txId), hexToBuffer(args.indexBlockHash)] - ); - if (result.rowCount === 0) { - return { found: false } as const; - } - const row = result.rows[0]; - const tx = this.parseTxQueryResult(row); - return { found: true, result: tx }; - }); - } - async getTx({ txId, includeUnanchored }: { txId: string; includeUnanchored: boolean }) { return this.queryTx(async client => { const maxBlockHeight = await this.getMaxBlockHeight(client, { includeUnanchored }); @@ -4852,113 +4897,147 @@ export class PgDataStore } } - async updateBatchSubdomains( + async updateBatchZonefiles( client: ClientBase, - blockData: { - index_block_hash: string; - parent_index_block_hash: string; - microblock_hash: string; - microblock_sequence: number; - microblock_canonical: boolean; - }, - subdomains: DbBnsSubdomain[] - ) { - // bns insertion variables - const columnCount = 18; - const insertParams = this.generateParameterizedInsertString({ - rowCount: subdomains.length, - columnCount, - }); - const values: any[] = []; - // zonefile insertion variables - const zonefilesColumnCount = 2; - const zonefileInsertParams = this.generateParameterizedInsertString({ - rowCount: subdomains.length, - columnCount: zonefilesColumnCount, - }); - const zonefileValues: string[] = []; - for (const subdomain of subdomains) { - let txIndex = subdomain.tx_index; - if (txIndex === -1) { - const txQuery = await client.query<{ tx_index: number }>( - ` - SELECT tx_index from txs - WHERE tx_id = $1 AND index_block_hash = $2 AND block_height = $3 - LIMIT 1 - `, - [ + data: DataStoreAttachmentSubdomainData[] + ): Promise { + let zonefileCount = 0; + const zonefileValues: any[] = []; + for (const dataItem of data) { + if (dataItem.subdomains && dataItem.blockData) { + for (const subdomain of dataItem.subdomains) { + zonefileValues.push( + subdomain.fully_qualified_subdomain, + subdomain.zonefile, + this.validateZonefileHash(subdomain.zonefile_hash), hexToBuffer(subdomain.tx_id), - hexToBuffer(blockData.index_block_hash), - subdomain.block_height, - ] - ); - if (txQuery.rowCount === 0) { - logger.warn(`Could not find tx index for subdomain entry: ${JSON.stringify(subdomain)}`); - txIndex = 0; - } else { - txIndex = txQuery.rows[0].tx_index; + hexToBuffer(dataItem.blockData.index_block_hash) + ); + zonefileCount++; } } - // preparing bns values for insertion - values.push( - subdomain.name, - subdomain.namespace_id, - subdomain.fully_qualified_subdomain, - subdomain.owner, - this.validateZonefileHash(subdomain.zonefile_hash), - subdomain.parent_zonefile_hash, - subdomain.parent_zonefile_index, - subdomain.block_height, - txIndex, - subdomain.zonefile_offset, - subdomain.resolver, - subdomain.canonical, - hexToBuffer(subdomain.tx_id), - hexToBuffer(blockData.index_block_hash), - hexToBuffer(blockData.parent_index_block_hash), - hexToBuffer(blockData.microblock_hash), - blockData.microblock_sequence, - blockData.microblock_canonical - ); - // preparing zonefile values for insertion - zonefileValues.push(subdomain.zonefile, this.validateZonefileHash(subdomain.zonefile_hash)); - } - // bns insertion query - const insertQuery = `INSERT INTO subdomains ( - name, namespace_id, fully_qualified_subdomain, owner, - zonefile_hash, parent_zonefile_hash, parent_zonefile_index, block_height, tx_index, - zonefile_offset, resolver, canonical, tx_id, - index_block_hash, parent_index_block_hash, microblock_hash, microblock_sequence, microblock_canonical - ) VALUES ${insertParams}`; - const insertQueryName = `insert-batch-subdomains_${columnCount}x${subdomains.length}`; - const insertBnsSubdomainsEventQuery: QueryConfig = { - name: insertQueryName, - text: insertQuery, - values, - }; - // zonefile insertion query - const zonefileInsertQuery = `INSERT INTO zonefiles (zonefile, zonefile_hash) VALUES ${zonefileInsertParams}`; - const insertZonefileQueryName = `insert-batch-zonefiles_${columnCount}x${subdomains.length}`; - const insertZonefilesEventQuery: QueryConfig = { - name: insertZonefileQueryName, - text: zonefileInsertQuery, - values: zonefileValues, - }; - try { - // checking for bns insertion errors - const bnsRes = await client.query(insertBnsSubdomainsEventQuery); - if (bnsRes.rowCount !== subdomains.length) { - throw new Error(`Expected ${subdomains.length} inserts, got ${bnsRes.rowCount} for BNS`); + if (dataItem.attachment) { + zonefileValues.push( + `${dataItem.attachment.name}.${dataItem.attachment.namespace}`, + Buffer.from(dataItem.attachment.zonefile, 'hex').toString(), + this.validateZonefileHash(dataItem.attachment.zonefileHash), + hexToBuffer(dataItem.attachment.txId), + hexToBuffer(dataItem.attachment.indexBlockHash) + ); + zonefileCount++; } - // checking for zonefile insertion errors + } + if (!zonefileCount) { + return; + } + try { + const zonefilesColumnCount = 5; + const zonefileInsertParams = this.generateParameterizedInsertString({ + rowCount: zonefileCount, + columnCount: zonefilesColumnCount, + }); + const zonefileInsertQuery = ` + INSERT INTO zonefiles (name, zonefile, zonefile_hash, tx_id, index_block_hash) + VALUES ${zonefileInsertParams} + ON CONFLICT ON CONSTRAINT unique_name_zonefile_hash_tx_id_index_block_hash DO + UPDATE SET zonefile = EXCLUDED.zonefile + `; + const insertZonefileQueryName = `insert-batch-zonefiles_${zonefilesColumnCount}x${zonefileCount}`; + const insertZonefilesEventQuery: QueryConfig = { + name: insertZonefileQueryName, + text: zonefileInsertQuery, + values: zonefileValues, + }; const zonefilesRes = await client.query(insertZonefilesEventQuery); - if (zonefilesRes.rowCount !== subdomains.length) { + if (zonefilesRes.rowCount !== zonefileCount) { throw new Error( - `Expected ${subdomains.length} inserts, got ${zonefilesRes.rowCount} for zonefiles` + `Expected ${zonefileCount} inserts, got ${zonefilesRes.rowCount} for zonefiles` ); } } catch (e: any) { - logError(`subdomain errors ${e.message}`, e); + logError(`zonefile batch error ${e.message}`, e); + throw e; + } + } + + async updateBatchSubdomains( + client: ClientBase, + data: DataStoreAttachmentSubdomainData[] + ): Promise { + let subdomainCount = 0; + const subdomainValues: any[] = []; + for (const dataItem of data) { + if (dataItem.subdomains && dataItem.blockData) { + for (const subdomain of dataItem.subdomains) { + subdomainValues.push( + subdomain.name, + subdomain.namespace_id, + subdomain.fully_qualified_subdomain, + subdomain.owner, + this.validateZonefileHash(subdomain.zonefile_hash), + subdomain.parent_zonefile_hash, + subdomain.parent_zonefile_index, + subdomain.block_height, + subdomain.tx_index, + subdomain.zonefile_offset, + subdomain.resolver, + subdomain.canonical, + hexToBuffer(subdomain.tx_id), + hexToBuffer(dataItem.blockData.index_block_hash), + hexToBuffer(dataItem.blockData.parent_index_block_hash), + hexToBuffer(dataItem.blockData.microblock_hash), + dataItem.blockData.microblock_sequence, + dataItem.blockData.microblock_canonical + ); + subdomainCount++; + } + } + } + if (!subdomainCount) { + return; + } + try { + const subdomainColumnCount = 18; + const subdomainInsertParams = this.generateParameterizedInsertString({ + rowCount: subdomainCount, + columnCount: subdomainColumnCount, + }); + const insertQuery = ` + INSERT INTO subdomains ( + name, namespace_id, fully_qualified_subdomain, owner, + zonefile_hash, parent_zonefile_hash, parent_zonefile_index, block_height, tx_index, + zonefile_offset, resolver, canonical, tx_id, + index_block_hash, parent_index_block_hash, microblock_hash, microblock_sequence, microblock_canonical + ) VALUES ${subdomainInsertParams} + ON CONFLICT ON CONSTRAINT unique_fully_qualified_subdomain_tx_id_index_block_hash_microblock_hash DO + UPDATE SET + name = EXCLUDED.name, + namespace_id = EXCLUDED.namespace_id, + owner = EXCLUDED.owner, + zonefile_hash = EXCLUDED.zonefile_hash, + parent_zonefile_hash = EXCLUDED.parent_zonefile_hash, + parent_zonefile_index = EXCLUDED.parent_zonefile_index, + block_height = EXCLUDED.block_height, + tx_index = EXCLUDED.tx_index, + zonefile_offset = EXCLUDED.zonefile_offset, + resolver = EXCLUDED.resolver, + canonical = EXCLUDED.canonical, + parent_index_block_hash = EXCLUDED.parent_index_block_hash, + microblock_sequence = EXCLUDED.microblock_sequence, + microblock_canonical = EXCLUDED.microblock_canonical + `; + const insertQueryName = `insert-batch-subdomains_${subdomainColumnCount}x${subdomainCount}`; + const insertBnsSubdomainsEventQuery: QueryConfig = { + name: insertQueryName, + text: insertQuery, + values: subdomainValues, + }; + const bnsRes = await client.query(insertBnsSubdomainsEventQuery); + if (bnsRes.rowCount !== subdomainCount) { + throw new Error(`Expected ${subdomainCount} inserts, got ${bnsRes.rowCount} for BNS`); + } + } catch (e: any) { + logError(`subdomain batch error ${e.message}`, e); throw e; } } @@ -6806,11 +6885,33 @@ export class PgDataStore const validZonefileHash = this.validateZonefileHash(zonefile_hash); await client.query( ` - INSERT INTO zonefiles (zonefile, zonefile_hash) - VALUES ($1, $2) - `, - [zonefile, validZonefileHash] + INSERT INTO zonefiles (name, zonefile, zonefile_hash, tx_id, index_block_hash) + VALUES ($1, $2, $3, $4, $5) + ON CONFLICT ON CONSTRAINT unique_name_zonefile_hash_tx_id_index_block_hash DO + UPDATE SET zonefile = EXCLUDED.zonefile + `, + [ + name, + zonefile, + validZonefileHash, + hexToBuffer(tx_id), + hexToBuffer(blockData.index_block_hash), + ] ); + // Try to figure out the name's expiration block based on its namespace's lifetime. + const namespaceLifetime = await client.query<{ lifetime: number }>( + `SELECT lifetime + FROM namespaces + WHERE namespace_id = $1 + AND canonical = true AND microblock_canonical = true + ORDER BY namespace_id, ready_block DESC, microblock_sequence DESC, tx_index DESC + LIMIT 1`, + [namespace_id] + ); + const expireBlock = + namespaceLifetime.rowCount > 0 + ? registered_at + namespaceLifetime.rows[0].lifetime + : expire_block; await client.query( ` INSERT INTO names( @@ -6818,12 +6919,25 @@ export class PgDataStore tx_index, tx_id, status, canonical, index_block_hash, parent_index_block_hash, microblock_hash, microblock_sequence, microblock_canonical ) values($1, $2, $3, $4, $5, $6, $7, $8,$9, $10, $11, $12, $13, $14, $15) - `, + ON CONFLICT ON CONSTRAINT unique_name_tx_id_index_block_hash_microblock_hash DO + UPDATE SET + address = EXCLUDED.address, + registered_at = EXCLUDED.registered_at, + expire_block = EXCLUDED.expire_block, + zonefile_hash = EXCLUDED.zonefile_hash, + namespace_id = EXCLUDED.namespace_id, + tx_index = EXCLUDED.tx_index, + status = EXCLUDED.status, + canonical = EXCLUDED.canonical, + parent_index_block_hash = EXCLUDED.parent_index_block_hash, + microblock_sequence = EXCLUDED.microblock_sequence, + microblock_canonical = EXCLUDED.microblock_canonical + `, [ name, address, registered_at, - expire_block, + expireBlock, validZonefileHash, namespace_id, tx_index, @@ -6867,15 +6981,32 @@ export class PgDataStore tx_index, canonical, } = bnsNamespace; - await client.query( ` INSERT INTO namespaces( namespace_id, launched_at, address, reveal_block, ready_block, buckets, - base,coeff, nonalpha_discount,no_vowel_discount, lifetime, status, tx_index, + base, coeff, nonalpha_discount, no_vowel_discount, lifetime, status, tx_index, tx_id, canonical, index_block_hash, parent_index_block_hash, microblock_hash, microblock_sequence, microblock_canonical ) values($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20) + ON CONFLICT ON CONSTRAINT unique_namespace_id_tx_id_index_block_hash_microblock_hash DO + UPDATE SET + launched_at = EXCLUDED.launched_at, + address = EXCLUDED.address, + reveal_block = EXCLUDED.reveal_block, + ready_block = EXCLUDED.ready_block, + buckets = EXCLUDED.buckets, + base = EXCLUDED.base, + coeff = EXCLUDED.coeff, + nonalpha_discount = EXCLUDED.nonalpha_discount, + no_vowel_discount = EXCLUDED.no_vowel_discount, + lifetime = EXCLUDED.lifetime, + status = EXCLUDED.status, + tx_index = EXCLUDED.tx_index, + canonical = EXCLUDED.canonical, + parent_index_block_hash = EXCLUDED.parent_index_block_hash, + microblock_sequence = EXCLUDED.microblock_sequence, + microblock_canonical = EXCLUDED.microblock_canonical `, [ namespace_id, @@ -6967,7 +7098,7 @@ export class PgDataStore FROM namespaces WHERE canonical = true AND microblock_canonical = true AND ready_block <= $1 - ORDER BY namespace_id, ready_block DESC, tx_index DESC + ORDER BY namespace_id, ready_block DESC, microblock_sequence DESC, tx_index DESC `, [maxBlockHeight] ); @@ -6998,7 +7129,7 @@ export class PgDataStore WHERE namespace_id = $1 AND registered_at <= $3 AND canonical = true AND microblock_canonical = true - ORDER BY name, registered_at DESC, tx_index DESC + ORDER BY name, registered_at DESC, microblock_sequence DESC, tx_index DESC LIMIT 100 OFFSET $2 `, @@ -7026,7 +7157,7 @@ export class PgDataStore WHERE namespace_id = $1 AND ready_block <= $2 AND canonical = true AND microblock_canonical = true - ORDER BY namespace_id, ready_block DESC, tx_index DESC + ORDER BY namespace_id, ready_block DESC, microblock_sequence DESC, tx_index DESC LIMIT 1 `, [namespace, maxBlockHeight] @@ -7060,13 +7191,15 @@ export class PgDataStore DbBnsName & { tx_id: Buffer; index_block_hash: Buffer } >( ` - SELECT DISTINCT ON (names.name) names.name, names.*, zonefiles.zonefile - FROM names - LEFT JOIN zonefiles ON names.zonefile_hash = zonefiles.zonefile_hash - WHERE name = $1 - AND registered_at <= $2 - AND canonical = true AND microblock_canonical = true - ORDER BY name, registered_at DESC, tx_index DESC + SELECT n.*, z.zonefile + FROM names AS n + LEFT JOIN zonefiles AS z USING (name, tx_id, index_block_hash) + WHERE n.name = $1 + AND n.registered_at <= $2 + AND n.canonical = true + AND n.microblock_canonical = true + ORDER BY n.registered_at DESC, n.microblock_sequence DESC, n.tx_index DESC + LIMIT 1 `, [name, maxBlockHeight] ); @@ -7117,27 +7250,54 @@ export class PgDataStore async getHistoricalZoneFile(args: { name: string; zoneFileHash: string; + includeUnanchored: boolean; }): Promise> { - const queryResult = await this.query(client => { + const queryResult = await this.queryTx(async client => { + const maxBlockHeight = await this.getMaxBlockHeight(client, { + includeUnanchored: args.includeUnanchored, + }); const validZonefileHash = this.validateZonefileHash(args.zoneFileHash); - return client.query<{ zonefile: string }>( - ` - SELECT zonefile - FROM names - LEFT JOIN zonefiles ON zonefiles.zonefile_hash = names.zonefile_hash - WHERE name = $1 - AND names.zonefile_hash = $2 - UNION ALL - SELECT zonefile - FROM subdomains - LEFT JOIN zonefiles ON zonefiles.zonefile_hash = subdomains.zonefile_hash - WHERE fully_qualified_subdomain = $1 - AND subdomains.zonefile_hash = $2 - `, - [args.name, validZonefileHash] - ); + // Depending on the kind of name we got, use the correct table to pivot on canonical chain + // state to get the zonefile. We can't pivot on the `txs` table because some names/subdomains + // were imported from Stacks v1 and they don't have an associated tx. + const isSubdomain = args.name.split('.').length > 2; + if (isSubdomain) { + return client.query<{ zonefile: string }>( + ` + SELECT zonefile + FROM zonefiles AS z + INNER JOIN subdomains AS s ON + s.fully_qualified_subdomain = z.name + AND s.tx_id = z.tx_id + AND s.index_block_hash = z.index_block_hash + WHERE z.name = $1 + AND z.zonefile_hash = $2 + AND s.canonical = TRUE + AND s.microblock_canonical = TRUE + AND s.block_height <= $3 + ORDER BY s.block_height DESC, s.microblock_sequence DESC, s.tx_index DESC + LIMIT 1 + `, + [args.name, validZonefileHash, maxBlockHeight] + ); + } else { + return client.query<{ zonefile: string }>( + ` + SELECT zonefile + FROM zonefiles AS z + INNER JOIN names AS n USING (name, tx_id, index_block_hash) + WHERE z.name = $1 + AND z.zonefile_hash = $2 + AND n.canonical = TRUE + AND n.microblock_canonical = TRUE + AND n.registered_at <= $3 + ORDER BY n.registered_at DESC, n.microblock_sequence DESC, n.tx_index DESC + LIMIT 1 + `, + [args.name, validZonefileHash, maxBlockHeight] + ); + } }); - if (queryResult.rowCount > 0) { return { found: true, @@ -7156,51 +7316,45 @@ export class PgDataStore }): Promise> { const queryResult = await this.queryTx(async client => { const maxBlockHeight = await this.getMaxBlockHeight(client, { includeUnanchored }); - const zonefileHashResult = await client.query<{ name: string; zonefile: string }>( - ` - SELECT name, zonefile_hash as zonefile FROM ( - ( - SELECT DISTINCT ON (name) name, zonefile_hash - FROM names - WHERE name = $1 - AND registered_at <= $2 - AND canonical = true AND microblock_canonical = true - ORDER BY name, registered_at DESC, tx_index DESC - LIMIT 1 - ) - UNION ALL ( - SELECT DISTINCT ON (fully_qualified_subdomain) fully_qualified_subdomain as name, zonefile_hash - FROM subdomains - WHERE fully_qualified_subdomain = $1 - AND block_height <= $2 - AND canonical = true AND microblock_canonical = true - ORDER BY fully_qualified_subdomain, block_height DESC, tx_index DESC - LIMIT 1 - ) - ) results - LIMIT 1 - `, - [name, maxBlockHeight] - ); - if (zonefileHashResult.rowCount === 0) { - return zonefileHashResult; + // Depending on the kind of name we got, use the correct table to pivot on canonical chain + // state to get the zonefile. We can't pivot on the `txs` table because some names/subdomains + // were imported from Stacks v1 and they don't have an associated tx. + const isSubdomain = name.split('.').length > 2; + if (isSubdomain) { + return client.query<{ zonefile: string }>( + ` + SELECT zonefile + FROM zonefiles AS z + INNER JOIN subdomains AS s ON + s.fully_qualified_subdomain = z.name + AND s.tx_id = z.tx_id + AND s.index_block_hash = z.index_block_hash + WHERE z.name = $1 + AND s.canonical = TRUE + AND s.microblock_canonical = TRUE + AND s.block_height <= $2 + ORDER BY s.block_height DESC, s.microblock_sequence DESC, s.tx_index DESC + LIMIT 1 + `, + [name, maxBlockHeight] + ); + } else { + return client.query<{ zonefile: string }>( + ` + SELECT zonefile + FROM zonefiles AS z + INNER JOIN names AS n USING (name, tx_id, index_block_hash) + WHERE z.name = $1 + AND n.canonical = TRUE + AND n.microblock_canonical = TRUE + AND n.registered_at <= $2 + ORDER BY n.registered_at DESC, n.microblock_sequence DESC, n.tx_index DESC + LIMIT 1 + `, + [name, maxBlockHeight] + ); } - const zonefileHash = zonefileHashResult.rows[0].zonefile; - const zonefileResult = await client.query<{ zonefile: string }>( - ` - SELECT zonefile - FROM zonefiles - WHERE zonefile_hash = $1 - `, - [zonefileHash] - ); - if (zonefileResult.rowCount === 0) { - return zonefileHashResult; - } - zonefileHashResult.rows[0].zonefile = zonefileResult.rows[0].zonefile; - return zonefileHashResult; }); - if (queryResult.rowCount > 0) { return { found: true, @@ -7313,8 +7467,10 @@ export class PgDataStore ` SELECT DISTINCT ON (fully_qualified_subdomain) fully_qualified_subdomain FROM subdomains - WHERE name = $1 AND block_height <= $2 - AND canonical = true AND microblock_canonical = true + WHERE name = $1 + AND block_height <= $2 + AND canonical = true + AND microblock_canonical = true ORDER BY fully_qualified_subdomain, block_height DESC, microblock_sequence DESC, tx_index DESC `, [name, maxBlockHeight] @@ -7340,7 +7496,7 @@ export class PgDataStore FROM subdomains WHERE block_height <= $2 AND canonical = true AND microblock_canonical = true - ORDER BY fully_qualified_subdomain, block_height DESC, tx_index DESC + ORDER BY fully_qualified_subdomain, block_height DESC, microblock_sequence DESC, tx_index DESC LIMIT 100 OFFSET $1 `, @@ -7361,7 +7517,7 @@ export class PgDataStore FROM names WHERE canonical = true AND microblock_canonical = true AND registered_at <= $2 - ORDER BY name, registered_at DESC, tx_index DESC + ORDER BY name, registered_at DESC, microblock_sequence DESC, tx_index DESC LIMIT 100 OFFSET $1 `, @@ -7382,36 +7538,29 @@ export class PgDataStore }): Promise> { const queryResult = await this.queryTx(async client => { const maxBlockHeight = await this.getMaxBlockHeight(client, { includeUnanchored }); - const subdomainResult = await client.query< + const result = await client.query< DbBnsSubdomain & { tx_id: Buffer; index_block_hash: Buffer } >( ` - SELECT DISTINCT ON(subdomains.fully_qualified_subdomain) subdomains.fully_qualified_subdomain, * - FROM subdomains - WHERE canonical = true AND microblock_canonical = true - AND block_height <= $2 - AND fully_qualified_subdomain = $1 - ORDER BY fully_qualified_subdomain, block_height DESC, tx_index DESC + SELECT s.*, z.zonefile + FROM subdomains AS s + LEFT JOIN zonefiles AS z + ON z.name = s.fully_qualified_subdomain + AND z.tx_id = s.tx_id + AND z.index_block_hash = s.index_block_hash + WHERE s.canonical = true + AND s.microblock_canonical = true + AND s.block_height <= $2 + AND s.fully_qualified_subdomain = $1 + ORDER BY s.block_height DESC, s.microblock_sequence DESC, s.tx_index DESC + LIMIT 1 `, [subdomain, maxBlockHeight] ); - if (subdomainResult.rowCount === 0 || !subdomainResult.rows[0].zonefile_hash) { - return subdomainResult; + if (result.rowCount === 0 || !result.rows[0].zonefile_hash) { + return result; } - const zonefileHash = subdomainResult.rows[0].zonefile_hash; - const zonefileResult = await client.query( - ` - SELECT zonefile - FROM zonefiles - WHERE zonefile_hash = $1 - `, - [zonefileHash] - ); - if (zonefileResult.rowCount === 0) { - return subdomainResult; - } - subdomainResult.rows[0].zonefile = zonefileResult.rows[0].zonefile; - return subdomainResult; + return result; }); if (queryResult.rowCount > 0) { return { @@ -7434,7 +7583,7 @@ export class PgDataStore FROM subdomains WHERE canonical = true AND microblock_canonical = true AND name = $1 - ORDER BY name, block_height DESC, tx_index DESC + ORDER BY name, block_height DESC, microblock_sequence DESC, tx_index DESC LIMIT 1 `, [args.name] diff --git a/src/event-replay/event-replay.ts b/src/event-replay/event-replay.ts index f38f8717..c1ca711d 100644 --- a/src/event-replay/event-replay.ts +++ b/src/event-replay/event-replay.ts @@ -156,6 +156,9 @@ export async function importEventsFromTsv( }); if (rawEvent.event_path === '/new_block') { blockHeight = await getDbBlockHeight(db); + if (blockHeight % 1000 === 0) { + console.log(`Event file block height reached: ${blockHeight}`); + } } } } diff --git a/src/bns-constants.ts b/src/event-stream/bns/bns-constants.ts similarity index 70% rename from src/bns-constants.ts rename to src/event-stream/bns/bns-constants.ts index 14248db2..f33e14dc 100644 --- a/src/bns-constants.ts +++ b/src/event-stream/bns/bns-constants.ts @@ -18,14 +18,5 @@ export const enum BnsContractIdentifier { mainnet = 'SP000000000000000000002Q6VF78.bns', testnet = 'ST000000000000000000002AMW42H.bns', } -export const namespaceReadyFunction = 'namespace-ready'; -export const nameFunctions = [ - 'name-import', - 'name-revoke', - 'name-update', - 'name-transfer', - 'name-renewal', - 'name-register', -]; export const bnsBlockchain = 'stacks'; diff --git a/src/bns-helpers.ts b/src/event-stream/bns/bns-helpers.ts similarity index 67% rename from src/bns-helpers.ts rename to src/event-stream/bns/bns-helpers.ts index 1c219402..a73edfe7 100644 --- a/src/bns-helpers.ts +++ b/src/event-stream/bns/bns-helpers.ts @@ -1,29 +1,25 @@ -import { Address, ChainID, StacksMessageType } from '@stacks/transactions'; -import { DbBnsNamespace } from './datastore/common'; -import { hexToBuffer, hexToUtf8String } from './helpers'; -import { CoreNodeParsedTxMessage } from './event-stream/core-node-message'; -import { StacksCoreRpcClient, getCoreNodeEndpoint } from './core-rpc/client'; +import { ChainID, ClarityType, hexToCV } from '@stacks/transactions'; +import { hexToBuffer, hexToUtf8String } from '../../helpers'; +import { CoreNodeParsedTxMessage } from '../../event-stream/core-node-message'; +import { getCoreNodeEndpoint } from '../../core-rpc/client'; import { StacksMainnet, StacksTestnet } from '@stacks/network'; import { URIType } from 'zone-file/dist/zoneFile'; -import { BnsContractIdentifier } from './bns-constants'; +import { BnsContractIdentifier, printTopic } from './bns-constants'; import * as crypto from 'crypto'; import { ClarityTypeID, decodeClarityValue, - ClarityValue, ClarityValueBuffer, - ClarityValueInt, ClarityValueList, - ClarityValueOptional, - ClarityValueOptionalSome, ClarityValueOptionalUInt, ClarityValuePrincipalStandard, ClarityValueStringAscii, ClarityValueTuple, ClarityValueUInt, TxPayloadTypeID, - ClarityValuePrincipalContract, } from 'stacks-encoding-native-js'; +import { SmartContractEvent } from '../core-node-message'; +import { DbBnsNamespace, DbBnsName } from '../../datastore/common'; interface Attachment { attachment: { @@ -160,8 +156,8 @@ export function parseNamespaceRawValue( const namespaceBns: DbBnsNamespace = { namespace_id: namespace, address: address, - base: Number(base), - coeff: Number(coeff), + base: base, + coeff: coeff, launched_at: launched_at, lifetime: Number(lifetime), no_vowel_discount: Number(no_vowel_discount), @@ -177,39 +173,6 @@ export function parseNamespaceRawValue( return namespaceBns; } -export function getFunctionName(tx_id: string, transactions: CoreNodeParsedTxMessage[]): string { - const contract_function_name: string = ''; - for (const tx of transactions) { - if (tx.core_tx.txid === tx_id) { - if (tx.parsed_tx.payload.type_id === TxPayloadTypeID.ContractCall) { - return tx.parsed_tx.payload.function_name; - } - } - } - return contract_function_name; -} - -export function getNewOwner( - tx_id: string, - transactions: CoreNodeParsedTxMessage[] -): string | undefined { - for (const tx of transactions) { - if (tx.core_tx.txid === tx_id) { - if (tx.parsed_tx.payload.type_id === TxPayloadTypeID.ContractCall) { - if ( - tx.parsed_tx.payload.function_args.length >= 3 && - tx.parsed_tx.payload.function_args[2].type_id === ClarityTypeID.PrincipalStandard - ) { - const decoded = decodeClarityValue(tx.parsed_tx.payload.function_args[2].hex); - const principal = decoded as ClarityValuePrincipalStandard; - principal.address; - } - } - } - } - return undefined; -} - export function GetStacksNetwork(chainId: ChainID) { const network = chainId === ChainID.Mainnet ? new StacksMainnet() : new StacksTestnet(); network.coreApiUrl = `http://${getCoreNodeEndpoint()}`; @@ -272,3 +235,81 @@ export function getBnsContractID(chainId: ChainID) { chainId === ChainID.Mainnet ? BnsContractIdentifier.mainnet : BnsContractIdentifier.testnet; return contractId; } + +function isEventFromBnsContract(event: SmartContractEvent): boolean { + return ( + event.contract_event.topic === printTopic && + (event.contract_event.contract_identifier === BnsContractIdentifier.mainnet || + event.contract_event.contract_identifier === BnsContractIdentifier.testnet) + ); +} + +export function parseNameFromContractEvent( + event: SmartContractEvent, + tx: CoreNodeParsedTxMessage, + blockHeight: number +): DbBnsName | undefined { + if (!isEventFromBnsContract(event)) { + return; + } + let attachment: Attachment; + try { + attachment = parseNameRawValue(event.contract_event.raw_value); + } catch (error) { + return; + } + let name_address = attachment.attachment.metadata.tx_sender.address; + // Is this a `name-transfer` contract call? If so, record the new owner. + if ( + attachment.attachment.metadata.op === 'name-transfer' && + tx.parsed_tx.payload.type_id === TxPayloadTypeID.ContractCall && + tx.parsed_tx.payload.function_args.length >= 3 && + tx.parsed_tx.payload.function_args[2].type_id === ClarityTypeID.PrincipalStandard + ) { + const decoded = decodeClarityValue(tx.parsed_tx.payload.function_args[2].hex); + const principal = decoded as ClarityValuePrincipalStandard; + name_address = principal.address; + } + const name: DbBnsName = { + name: attachment.attachment.metadata.name.concat('.', attachment.attachment.metadata.namespace), + namespace_id: attachment.attachment.metadata.namespace, + address: name_address, + // expire_block will be calculated upon DB insert based on the namespace's lifetime. + expire_block: 0, + registered_at: blockHeight, + zonefile_hash: attachment.attachment.hash, + // zonefile will be updated when an `/attachments/new` message arrives. + zonefile: '', + tx_id: event.txid, + tx_index: tx.core_tx.tx_index, + status: attachment.attachment.metadata.op, + canonical: true, + }; + return name; +} + +export function parseNamespaceFromContractEvent( + event: SmartContractEvent, + tx: CoreNodeParsedTxMessage, + blockHeight: number +): DbBnsNamespace | undefined { + if (!isEventFromBnsContract(event)) { + return; + } + // Look for a `namespace-ready` BNS print event. + const decodedEvent = hexToCV(event.contract_event.raw_value); + if ( + decodedEvent.type === ClarityType.Tuple && + decodedEvent.data.status && + decodedEvent.data.status.type === ClarityType.StringASCII && + decodedEvent.data.status.data === 'ready' + ) { + const namespace = parseNamespaceRawValue( + event.contract_event.raw_value, + blockHeight, + event.txid, + tx.core_tx.tx_index + ); + return namespace; + } +} diff --git a/src/event-stream/core-node-message.ts b/src/event-stream/core-node-message.ts index f00a5b4a..e8dd76a8 100644 --- a/src/event-stream/core-node-message.ts +++ b/src/event-stream/core-node-message.ts @@ -25,7 +25,7 @@ interface CoreNodeEventBase { committed: boolean; } -interface SmartContractEvent extends CoreNodeEventBase { +export interface SmartContractEvent extends CoreNodeEventBase { type: CoreNodeEventType.ContractEvent; contract_event: { /** Fully qualified contract ID, e.g. "ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH.kv-store" */ diff --git a/src/event-stream/event-server.ts b/src/event-stream/event-server.ts index b2980ca4..18f1acd0 100644 --- a/src/event-stream/event-server.ts +++ b/src/event-stream/event-server.ts @@ -1,6 +1,6 @@ import { inspect } from 'util'; import * as net from 'net'; -import { Server, createServer } from 'http'; +import { createServer } from 'http'; import * as express from 'express'; import * as bodyParser from 'body-parser'; import { asyncHandler } from '../api/async-handler'; @@ -8,7 +8,7 @@ import PQueue from 'p-queue'; import * as expressWinston from 'express-winston'; import * as winston from 'winston'; -import { hexToBuffer, logError, logger, digestSha512_256, I32_MAX, LogLevel } from '../helpers'; +import { hexToBuffer, logError, logger, LogLevel } from '../helpers'; import { CoreNodeBlockMessage, CoreNodeEventType, @@ -44,6 +44,7 @@ import { DataStoreMicroblockUpdateData, DataStoreTxEventData, DbMicroblock, + DataStoreAttachmentData, } from '../datastore/common'; import { getTxSenderAddress, @@ -61,23 +62,8 @@ import { TxPayloadTypeID, } from 'stacks-encoding-native-js'; import { ChainID } from '@stacks/transactions'; -import { - getFunctionName, - getNewOwner, - parseNameRawValue, - parseNamespaceRawValue, - parseResolver, - parseZoneFileTxt, -} from '../bns-helpers'; - -import { - printTopic, - namespaceReadyFunction, - nameFunctions, - BnsContractIdentifier, -} from '../bns-constants'; - -import * as zoneFileParser from 'zone-file'; +import { BnsContractIdentifier } from './bns/bns-constants'; +import { parseNameFromContractEvent, parseNamespaceFromContractEvent } from './bns/bns-helpers'; async function handleRawEventRequest( eventPath: string, @@ -381,51 +367,18 @@ function parseDataStoreTxEventData( value: hexToBuffer(event.contract_event.raw_value), }; dbTx.contractLogEvents.push(entry); - if ( - event.contract_event.topic === printTopic && - (event.contract_event.contract_identifier === BnsContractIdentifier.mainnet || - event.contract_event.contract_identifier === BnsContractIdentifier.testnet) - ) { - const functionName = getFunctionName(event.txid, parsedTxs); - if (nameFunctions.includes(functionName)) { - const attachment = parseNameRawValue(event.contract_event.raw_value); - let name_address = attachment.attachment.metadata.tx_sender.address; - if (functionName === 'name-transfer') { - const new_owner = getNewOwner(event.txid, parsedTxs); - if (new_owner) { - name_address = new_owner; - } - } - const name: DbBnsName = { - name: attachment.attachment.metadata.name.concat( - '.', - attachment.attachment.metadata.namespace - ), - namespace_id: attachment.attachment.metadata.namespace, - address: name_address, - expire_block: 0, - registered_at: blockData.block_height, - zonefile_hash: attachment.attachment.hash, - zonefile: '', // zone file will be updated in /attachments/new - tx_id: event.txid, - tx_index: entry.tx_index, - status: attachment.attachment.metadata.op, - canonical: true, - }; - dbTx.names.push(name); - } - if (functionName === namespaceReadyFunction) { - // event received for namespaces - const namespace: DbBnsNamespace | undefined = parseNamespaceRawValue( - event.contract_event.raw_value, - blockData.block_height, - event.txid, - entry.tx_index - ); - if (namespace != undefined) { - dbTx.namespaces.push(namespace); - } - } + // Check if we have new BNS names or namespaces. + const parsedTx = parsedTxs.find(entry => entry.core_tx.txid === event.txid); + if (!parsedTx) { + throw new Error(`Unexpected missing tx during BNS parsing by tx_id ${event.txid}`); + } + const name = parseNameFromContractEvent(event, parsedTx, blockData.block_height); + if (name) { + dbTx.names.push(name); + } + const namespace = parseNamespaceFromContractEvent(event, parsedTx, blockData.block_height); + if (namespace) { + dbTx.namespaces.push(namespace); } break; } @@ -575,83 +528,33 @@ function parseDataStoreTxEventData( } async function handleNewAttachmentMessage(msg: CoreNodeAttachmentMessage[], db: DataStore) { - for (const attachment of msg) { - if ( - attachment.contract_id === BnsContractIdentifier.mainnet || - attachment.contract_id === BnsContractIdentifier.testnet - ) { - const metadataCV = decodeClarityValue< - ClarityValueTuple<{ - op: ClarityValueStringAscii; - name: ClarityValueBuffer; - namespace: ClarityValueBuffer; - }> - >(attachment.metadata); - const op = metadataCV.data['op'].data; - const zonefile = Buffer.from(attachment.content.slice(2), 'hex').toString(); - const zoneFileHash = attachment.content_hash; - if (op === 'name-update') { - const name = hexToBuffer(metadataCV.data['name'].buffer).toString('utf8'); - const namespace = hexToBuffer(metadataCV.data['namespace'].buffer).toString('utf8'); - const zoneFileContents = zoneFileParser.parseZoneFile(zonefile); - const zoneFileTxt = zoneFileContents.txt; - const blockData = { - index_block_hash: '', - parent_index_block_hash: '', - microblock_hash: '', - microblock_sequence: I32_MAX, - microblock_canonical: true, - }; - // Case for subdomain - if (zoneFileTxt) { - // get unresolved subdomain - let isCanonical = true; - const dbTx = await db.getTxStrict({ - txId: attachment.tx_id, - indexBlockHash: attachment.index_block_hash, - }); - if (dbTx.found) { - isCanonical = dbTx.result.canonical; - blockData.index_block_hash = dbTx.result.index_block_hash; - blockData.parent_index_block_hash = dbTx.result.parent_index_block_hash; - blockData.microblock_hash = dbTx.result.microblock_hash; - blockData.microblock_sequence = dbTx.result.microblock_sequence; - blockData.microblock_canonical = dbTx.result.microblock_canonical; - } else { - logger.warn( - `Could not find transaction ${attachment.tx_id} associated with attachment` - ); - } - // case for subdomain - const subdomains: DbBnsSubdomain[] = []; - for (let i = 0; i < zoneFileTxt.length; i++) { - const zoneFile = zoneFileTxt[i]; - const parsedTxt = parseZoneFileTxt(zoneFile.txt); - if (parsedTxt.owner === '') continue; //if txt has no owner , skip it - const subdomain: DbBnsSubdomain = { - name: name.concat('.', namespace), - namespace_id: namespace, - fully_qualified_subdomain: zoneFile.name.concat('.', name, '.', namespace), - owner: parsedTxt.owner, - zonefile_hash: parsedTxt.zoneFileHash, - zonefile: parsedTxt.zoneFile, - tx_id: attachment.tx_id, - tx_index: -1, - canonical: isCanonical, - parent_zonefile_hash: attachment.content_hash.slice(2), - parent_zonefile_index: 0, //TODO need to figure out this field - block_height: Number.parseInt(attachment.block_height, 10), - zonefile_offset: 1, - resolver: zoneFileContents.uri ? parseResolver(zoneFileContents.uri) : '', - }; - subdomains.push(subdomain); - } - await db.resolveBnsSubdomains(blockData, subdomains); - } + const attachments = msg + .map(message => { + if ( + message.contract_id === BnsContractIdentifier.mainnet || + message.contract_id === BnsContractIdentifier.testnet + ) { + const metadataCV = decodeClarityValue< + ClarityValueTuple<{ + op: ClarityValueStringAscii; + name: ClarityValueBuffer; + namespace: ClarityValueBuffer; + }> + >(message.metadata); + return { + op: metadataCV.data['op'].data, + zonefile: message.content.slice(2), + name: hexToBuffer(metadataCV.data['name'].buffer).toString('utf8'), + namespace: hexToBuffer(metadataCV.data['namespace'].buffer).toString('utf8'), + zonefileHash: message.content_hash, + txId: message.tx_id, + indexBlockHash: message.index_block_hash, + blockHeight: Number.parseInt(message.block_height, 10), + } as DataStoreAttachmentData; } - await db.updateZoneContent(zonefile, zoneFileHash, attachment.tx_id); - } - } + }) + .filter((msg): msg is DataStoreAttachmentData => !!msg); + await db.updateAttachments(attachments); } interface EventMessageHandler { diff --git a/src/helpers.ts b/src/helpers.ts index 1c3954ca..c3f0a82a 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -157,7 +157,7 @@ type DisabledLogLevels = Exclude< type LoggerInterface = Omit & { level: LogLevel }; const LOG_LEVELS: LogLevel[] = ['error', 'warn', 'info', 'http', 'verbose', 'debug', 'silly']; -const defaultLogLevel: LogLevel = (() => { +export const defaultLogLevel: LogLevel = (() => { const STACKS_API_LOG_LEVEL_ENV_VAR = 'STACKS_API_LOG_LEVEL'; const logLevelEnvVar = process.env[ STACKS_API_LOG_LEVEL_ENV_VAR @@ -236,12 +236,6 @@ export function microStxToStx(microStx: bigint | BigNumber): string { return bigNumResult.toFixed(STACKS_DECIMAL_PLACES, MAX_BIGNUMBER_ROUND_MODE); } -export function digestSha512_256(input: Buffer): Buffer { - const hash = crypto.createHash('sha512-256'); - const digest = hash.update(input).digest(); - return digest; -} - /** * Checks if a string is a valid Bitcoin address. * Supports mainnet and testnet address. diff --git a/src/import-v1/index.ts b/src/import-v1/index.ts index 1704466a..fa433d0d 100644 --- a/src/import-v1/index.ts +++ b/src/import-v1/index.ts @@ -185,8 +185,8 @@ class ChainProcessor extends stream.Writable { reveal_block: 0, ready_block: 0, buckets: parts[2], - base: parseInt(parts[3], 10), - coeff: parseInt(parts[4], 10), + base: BigInt(parts[3]), + coeff: BigInt(parts[4]), nonalpha_discount: parseInt(parts[5], 10), no_vowel_discount: parseInt(parts[6], 10), lifetime: parseInt(parts[7], 10), @@ -429,15 +429,6 @@ export async function importV1BnsData(db: PgDataStore, importDir: string) { const client = await db.pool.connect(); try { await client.query('BEGIN'); - logger.info(`Disabling BNS table indices temporarily for a faster import`); - await client.query(` - UPDATE pg_index - SET indisready = false, indisvalid = false - WHERE indrelid = ANY ( - SELECT oid FROM pg_class - WHERE relname IN ('subdomains', 'zonefiles', 'namespaces', 'names') - ) - `); const zhashes = await readZones(path.join(importDir, 'name_zonefiles.txt')); await pipeline( fs.createReadStream(path.join(importDir, 'chainstate.txt')), @@ -460,7 +451,8 @@ export async function importV1BnsData(db: PgDataStore, importDir: string) { SUBDOMAIN_BATCH_SIZE, false )) { - await db.updateBatchSubdomains(client, blockData, subdomainBatch); + await db.updateBatchSubdomains(client, [{ blockData, subdomains: subdomainBatch }]); + await db.updateBatchZonefiles(client, [{ blockData, subdomains: subdomainBatch }]); subdomainsImported += subdomainBatch.length; if (subdomainsImported % 10_000 === 0) { logger.info(`Subdomains imported: ${subdomainsImported}`); @@ -474,12 +466,6 @@ export async function importV1BnsData(db: PgDataStore, importDir: string) { bns_subdomains_imported: true, }; await db.updateConfigState(updatedConfigState, client); - - logger.info(`Re-indexing BNS tables. This might take a while...`); - await client.query(`REINDEX TABLE subdomains`); - await client.query(`REINDEX TABLE zonefiles`); - await client.query(`REINDEX TABLE namespaces`); - await client.query(`REINDEX TABLE names`); await client.query('COMMIT'); } catch (error) { await client.query('ROLLBACK'); diff --git a/src/migrations/1608030374841_namespaces.ts b/src/migrations/1608030374841_namespaces.ts index 1d72636e..1ac9280d 100644 --- a/src/migrations/1608030374841_namespaces.ts +++ b/src/migrations/1608030374841_namespaces.ts @@ -33,11 +33,11 @@ export async function up(pgm: MigrationBuilder): Promise { notNull: true, }, base: { - type: 'integer', + type: 'numeric', notNull: true, }, coeff: { - type: 'integer', + type: 'numeric', notNull: true, }, nonalpha_discount: { @@ -91,7 +91,14 @@ export async function up(pgm: MigrationBuilder): Promise { }, }); - pgm.createIndex('namespaces', 'index_block_hash', { method: 'hash' }); - pgm.createIndex('namespaces', 'microblock_hash', { method: 'hash' }); - pgm.createIndex('namespaces', [{ name: 'ready_block', sort: 'DESC' }]); + pgm.createIndex('namespaces', [ + { name: 'ready_block', sort: 'DESC' }, + { name: 'microblock_sequence', sort: 'DESC' }, + { name: 'tx_index', sort: 'DESC' }, + ]); + pgm.addConstraint( + 'namespaces', + 'unique_namespace_id_tx_id_index_block_hash_microblock_hash', + 'UNIQUE(namespace_id, tx_id, index_block_hash, microblock_hash)' + ); } diff --git a/src/migrations/1608030374842_names.ts b/src/migrations/1608030374842_names.ts index 462d566a..7d0feaf3 100644 --- a/src/migrations/1608030374842_names.ts +++ b/src/migrations/1608030374842_names.ts @@ -83,9 +83,15 @@ export async function up(pgm: MigrationBuilder): Promise { }, }); - pgm.createIndex('names', 'tx_id', { method: 'hash' }); - pgm.createIndex('names', 'name', { method: 'hash' }); - pgm.createIndex('names', 'index_block_hash', { method: 'hash' }); - pgm.createIndex('names', 'microblock_hash', { method: 'hash' }); - pgm.createIndex('names', [{ name: 'registered_at', sort: 'DESC' }]); + pgm.createIndex('names', 'namespace_id'); + pgm.createIndex('names', [ + { name: 'registered_at', sort: 'DESC' }, + { name: 'microblock_sequence', sort: 'DESC' }, + { name: 'tx_index', sort: 'DESC' }, + ]); + pgm.addConstraint( + 'names', + 'unique_name_tx_id_index_block_hash_microblock_hash', + 'UNIQUE(name, tx_id, index_block_hash, microblock_hash)' + ); } diff --git a/src/migrations/1610030345948_subdomains.ts b/src/migrations/1610030345948_subdomains.ts index f5a76007..bcd24daa 100644 --- a/src/migrations/1610030345948_subdomains.ts +++ b/src/migrations/1610030345948_subdomains.ts @@ -84,10 +84,15 @@ export async function up(pgm: MigrationBuilder): Promise { }, }); - pgm.createIndex('subdomains', 'owner', { method: 'hash' }); - pgm.createIndex('subdomains', 'zonefile_hash', { method: 'hash' }); - pgm.createIndex('subdomains', 'fully_qualified_subdomain', { method: 'hash' }); - pgm.createIndex('subdomains', 'index_block_hash', { method: 'hash' }); - pgm.createIndex('subdomains', 'microblock_hash', { method: 'hash' }); - pgm.createIndex('subdomains', [{ name: 'block_height', sort: 'DESC' }]); + pgm.createIndex('subdomains', 'name'); + pgm.createIndex('subdomains', [ + { name: 'block_height', sort: 'DESC' }, + { name: 'microblock_sequence', sort: 'DESC' }, + { name: 'tx_index', sort: 'DESC' }, + ]); + pgm.addConstraint( + 'subdomains', + 'unique_fully_qualified_subdomain_tx_id_index_block_hash_microblock_hash', + 'UNIQUE(fully_qualified_subdomain, tx_id, index_block_hash, microblock_hash)' + ); } diff --git a/src/migrations/1626441820095_zonefiles.ts b/src/migrations/1626441820095_zonefiles.ts index 186730c7..c1cf58ce 100644 --- a/src/migrations/1626441820095_zonefiles.ts +++ b/src/migrations/1626441820095_zonefiles.ts @@ -9,6 +9,10 @@ export async function up(pgm: MigrationBuilder): Promise { type: 'serial', primaryKey: true, }, + name: { + type: 'string', + notNull: true, + }, zonefile: { type: 'string', notNull: true, @@ -16,8 +20,21 @@ export async function up(pgm: MigrationBuilder): Promise { zonefile_hash: { type: 'string', notNull: true, + }, + tx_id: { + type: 'bytea', + notNull: false, + }, + index_block_hash: { + type: 'bytea', + notNull: false, } }); - pgm.createIndex('zonefiles', 'zonefile_hash', { method: 'hash' }); + pgm.addIndex('zonefiles', 'zonefile_hash'); + pgm.addConstraint( + 'zonefiles', + 'unique_name_zonefile_hash_tx_id_index_block_hash', + 'UNIQUE(name, zonefile_hash, tx_id, index_block_hash)' + ); } diff --git a/src/test-utils/test-builders.ts b/src/test-utils/test-builders.ts index 960cf55e..f84c38ce 100644 --- a/src/test-utils/test-builders.ts +++ b/src/test-utils/test-builders.ts @@ -13,6 +13,7 @@ import { DbAssetEventTypeId, DbBlock, DbBnsName, + DbBnsNamespace, DbEventTypeId, DbFtEvent, DbMempoolTx, @@ -496,6 +497,49 @@ function testMinerReward(args?: TestMinerRewardArgs): DbMinerReward { }; } +interface TestBnsNamespaceArgs { + namespace_id?: string; + address?: string; + launched_at?: number; + reveal_block?: number; + ready_block?: number; + buckets?: string; + base?: bigint; + coeff?: bigint; + nonalpha_discount?: number; + no_vowel_discount?: number; + lifetime?: number; + status?: string; + tx_id?: string; + tx_index?: number; + canonical?: boolean; +} + +/** + * Generate a test BNS namespace + * @param args - Optional namespace data + * @returns `DbBnsNamespace` + */ +function testBnsNamespace(args?: TestBnsNamespaceArgs): DbBnsNamespace { + return { + namespace_id: args?.namespace_id ?? BNS_NAMESPACE_ID, + address: args?.address ?? SENDER_ADDRESS, + launched_at: args?.launched_at ?? BLOCK_HEIGHT, + reveal_block: args?.reveal_block ?? BLOCK_HEIGHT, + ready_block: args?.ready_block ?? BLOCK_HEIGHT, + buckets: args?.buckets ?? '1,1,1', + base: args?.base ?? 1n, + coeff: args?.coeff ?? 1n, + nonalpha_discount: args?.nonalpha_discount ?? 0, + no_vowel_discount: args?.no_vowel_discount ?? 0, + lifetime: args?.lifetime ?? 0, + status: args?.status ?? 'ready', + tx_id: args?.tx_id ?? TX_ID, + tx_index: args?.tx_index ?? 0, + canonical: args?.canonical ?? true, + }; +} + interface TestBnsNameArgs { name?: string; address?: string; @@ -655,12 +699,24 @@ export class TestBlockBuilder { addTxBnsName(args?: TestBnsNameArgs): TestBlockBuilder { const defaultArgs: TestBnsNameArgs = { tx_id: this.txData.tx.tx_id, + tx_index: this.txIndex, registered_at: this.block.block_height, }; this.txData.names.push(testBnsName({ ...defaultArgs, ...args })); return this; } + addTxBnsNamespace(args?: TestBnsNamespaceArgs): TestBlockBuilder { + const defaultArgs: TestBnsNamespaceArgs = { + tx_id: this.txData.tx.tx_id, + tx_index: this.txIndex, + ready_block: this.block.block_height, + reveal_block: this.block.block_height, + }; + this.txData.namespaces.push(testBnsNamespace({ ...defaultArgs, ...args })); + return this; + } + build(): DataStoreBlockUpdateData { return this.data; } @@ -746,6 +802,15 @@ export class TestMicroblockStreamBuilder { return this; } + addTxBnsNamespace(args?: TestBnsNamespaceArgs): TestMicroblockStreamBuilder { + const defaultArgs: TestBnsNamespaceArgs = { + tx_id: this.txData.tx.tx_id, + tx_index: this.txIndex, + }; + this.txData.namespaces.push(testBnsNamespace({ ...defaultArgs, ...args })); + return this; + } + build(): DataStoreMicroblockUpdateData { return this.data; } diff --git a/src/tests-bns/api.ts b/src/tests-bns/api.ts index d85c7a56..deee6a52 100644 --- a/src/tests-bns/api.ts +++ b/src/tests-bns/api.ts @@ -69,7 +69,7 @@ describe('BNS API tests', () => { miner_txid: '0x4321', canonical: true, }) - .addTx() + .addTx({ tx_id: '0x1234' }) .addTxNftEvent({ asset_event_type_id: DbAssetEventTypeId.Mint, value: bnsNameCV('xyz.abc'), @@ -92,8 +92,8 @@ describe('BNS API tests', () => { const namespace: DbBnsNamespace = { namespace_id: 'abc', address: 'ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH', - base: 1, - coeff: 1, + base: 1n, + coeff: 1n, launched_at: 14, lifetime: 1, no_vowel_discount: 1, @@ -289,15 +289,15 @@ describe('BNS API tests', () => { zonefile_offset: 0, parent_zonefile_hash: 'p-test-hash', parent_zonefile_index: 0, - block_height: dbBlock.block_height, + block_height: 2, tx_index: 0, - tx_id: '', + tx_id: '0x22', canonical: true, }; await db.resolveBnsSubdomains( { - index_block_hash: dbBlock.index_block_hash, - parent_index_block_hash: dbBlock.parent_index_block_hash, + index_block_hash: '0x02', + parent_index_block_hash: '0x1234', microblock_hash: '', microblock_sequence: I32_MAX, microblock_canonical: true, @@ -343,8 +343,8 @@ describe('BNS API tests', () => { ); const query1 = await supertest(api.server).get(`/v1/names/invalid/zonefile/${zonefileHash}`); - expect(query1.status).toBe(400); - expect(query1.body.error).toBe('Invalid name or subdomain'); + expect(query1.status).toBe(404); + expect(query1.body.error).toBe('No such name or zonefile'); expect(query1.type).toBe('application/json'); }); @@ -380,7 +380,7 @@ describe('BNS API tests', () => { const query1 = await supertest(api.server).get(`/v1/names/${name}/zonefile/invalidHash`); expect(query1.status).toBe(404); - expect(query1.body.error).toBe('No such zonefile'); + expect(query1.body.error).toBe('No such name or zonefile'); expect(query1.type).toBe('application/json'); }); @@ -670,13 +670,13 @@ describe('BNS API tests', () => { parent_zonefile_index: 0, block_height: dbBlock.block_height, tx_index: 0, - tx_id: '', + tx_id: '0x22', canonical: true, }; await db.resolveBnsSubdomains( { - index_block_hash: dbBlock.index_block_hash, - parent_index_block_hash: dbBlock.parent_index_block_hash, + index_block_hash: '0x02', + parent_index_block_hash: '0x1234', microblock_hash: '', microblock_sequence: I32_MAX, microblock_canonical: true, @@ -694,8 +694,8 @@ describe('BNS API tests', () => { test('Fail get zonefile by name - invalid name', async () => { const query1 = await supertest(api.server).get(`/v1/names/invalidName/zonefile`); - expect(query1.status).toBe(400); - expect(query1.body.error).toBe('Invalid name or subdomain'); + expect(query1.status).toBe(404); + expect(query1.body.error).toBe('No such name or zonefile does not exist'); expect(query1.type).toBe('application/json'); }); @@ -764,7 +764,7 @@ describe('BNS API tests', () => { parent_zonefile_index: 0, block_height: dbBlock.block_height, tx_index: 0, - tx_id: '', + tx_id: '0x1234', canonical: true, }; await db.resolveBnsSubdomains( @@ -782,6 +782,15 @@ describe('BNS API tests', () => { `/v1/names/${subdomain.fully_qualified_subdomain}` ); expect(query.status).toBe(200); + expect(query.body).toStrictEqual({ + address: "test-address", + blockchain: "stacks", + last_txid: "0x1234", + resolver: "https://registrar.blockstack.org", + status: "registered_subdomain", + zonefile: "test", + zonefile_hash: "test-hash", + }); }); test('Success: fqn redirect test', async () => { @@ -798,7 +807,7 @@ describe('BNS API tests', () => { parent_zonefile_index: 0, block_height: dbBlock.block_height, tx_index: 0, - tx_id: '', + tx_id: '0x1234', canonical: true, }; await db.resolveBnsSubdomains( diff --git a/src/tests-bns/bns-helpers-tests.ts b/src/tests-bns/bns-helpers-tests.ts new file mode 100644 index 00000000..02c077f1 --- /dev/null +++ b/src/tests-bns/bns-helpers-tests.ts @@ -0,0 +1,95 @@ +import { + parseNamespaceRawValue, + parseNameRawValue, + parseZoneFileTxt, +} from '../event-stream/bns/bns-helpers'; +import * as zoneFileParser from 'zone-file'; + +describe('BNS helper tests', () => { + test('Success: namespace parsed', () => { + const expectedNamespace = { + namespace_id: 'xyz', + address: 'ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH', + base: 1n, + coeff: 1n, + launched_at: 14, + lifetime: 1, + no_vowel_discount: 1, + nonalpha_discount: 1, + ready_block: 4, + reveal_block: 6, + status: 'ready', + buckets: '1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1', + tx_id: '0x2114c8cda9e829f8b5d3c4163724ae9c4d9142d2bae4a35bffb006408d21c0ab', + index_block_hash: '0xdeadbeef', + }; + const namespace = parseNamespaceRawValue( + // This value comes from Smart Contract Event (event.contract_event.raw_value) + '0x0c00000003096e616d657370616365020000000378797a0a70726f706572746965730c000000050b6c61756e636865642d61740a010000000000000000000000000000000e086c69666574696d650100000000000000000000000000000001106e616d6573706163652d696d706f7274051abf8e82623c380cd870931d48b525d5e12a4d67820e70726963652d66756e6374696f6e0c0000000504626173650100000000000000000000000000000001076275636b6574730b00000010010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000105636f6566660100000000000000000000000000000001116e6f2d766f77656c2d646973636f756e740100000000000000000000000000000001116e6f6e616c7068612d646973636f756e7401000000000000000000000000000000010b72657665616c65642d61740100000000000000000000000000000006067374617475730d000000057265616479', + 4, + '0x2114c8cda9e829f8b5d3c4163724ae9c4d9142d2bae4a35bffb006408d21c0ab', + 0 + ); + expect(namespace?.address).toEqual(expectedNamespace.address); + expect(namespace?.namespace_id).toEqual(expectedNamespace.namespace_id); + expect(namespace?.base).toEqual(expectedNamespace.base); + expect(namespace?.coeff).toEqual(expectedNamespace.coeff); + expect(namespace?.launched_at).toEqual(expectedNamespace.launched_at); + expect(namespace?.lifetime).toEqual(expectedNamespace.lifetime); + expect(namespace?.no_vowel_discount).toEqual(expectedNamespace.no_vowel_discount); + expect(namespace?.nonalpha_discount).toEqual(expectedNamespace.nonalpha_discount); + expect(namespace?.ready_block).toEqual(expectedNamespace.ready_block); + expect(namespace?.reveal_block).toEqual(expectedNamespace.reveal_block); + expect(namespace?.status).toEqual(expectedNamespace.status); + expect(namespace?.buckets).toEqual(expectedNamespace.buckets); + expect(namespace?.tx_id).toEqual(expectedNamespace.tx_id); + }); + + test('Success: parse name raw value', () => { + const expectedName = { + attachment: { + hash: 'c5217bcb3e52612ff7c835f9bb46a5f86aa73b8d', + metadata: { + name: 'abcdef', + namespace: 'xyz', + tx_sender: { + type: 0, + version: 26, + hash160: 'bf8e82623c380cd870931d48b525d5e12a4d6782', + }, + op: 'name-import', + }, + }, + }; + const expectedAttachment = expectedName.attachment; + const name = parseNameRawValue( + // This value comes from Smart Contract Event (event.contract_event.raw_value) + '0x0c000000010a6174746163686d656e740c00000003106174746163686d656e742d696e646578010000000000000000000000000000000004686173680200000014c5217bcb3e52612ff7c835f9bb46a5f86aa73b8d086d657461646174610c00000004046e616d650200000006616263646566096e616d657370616365020000000378797a026f700d0000000b6e616d652d696d706f72740974782d73656e646572051abf8e82623c380cd870931d48b525d5e12a4d6782' + ); + const attachment = name.attachment; + expect(attachment.hash).toEqual(expectedAttachment.hash); + expect(attachment.metadata.name).toEqual(expectedAttachment.metadata.name); + expect(attachment.metadata.namespace).toEqual(expectedAttachment.metadata.namespace); + expect(attachment.metadata.op).toEqual(expectedAttachment.metadata.op); + expect(attachment.metadata.tx_sender.version).toEqual( + expectedAttachment.metadata.tx_sender.version + ); + expect(attachment.metadata.tx_sender.hash160).toEqual( + expectedAttachment.metadata.tx_sender.hash160 + ); + }); + + test('Parse TXT', () => { + const subdomain = `$ORIGIN abcdef.xyz + $TTL 3600 + asim IN TXT "owner=ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH" "seqn=0" "parts=1" "zf0=JE9SSUdJTiBhc2ltCiRUVEwgMzYwMApfaHR0cHMuX3RjcCBVUkkgMTAgMSAiaHR0cHM6Ly9nYWlhLmJsb2Nrc3RhY2sub3JnL2h1Yi9TVDJaUlgwSzI3R1cwU1AzR0pDRU1IRDk1VFFHSk1LQjdHOVkwWDFNSC9wcm9maWxlLmpzb24iCg==" + _http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1M3325hr1utdv4HhSAfvYKhapzPP9Axhde/profile.json" + _resolver IN URI 10 1 "http://localhost:3000" + `; + const parsedZoneFile = zoneFileParser.parseZoneFile(subdomain); + const zoneFileTxt = parseZoneFileTxt(parsedZoneFile.txt?.[0].txt as string[]); + expect(zoneFileTxt.owner).toBe('ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH'); + expect(zoneFileTxt.parts).toBe('1'); + expect(zoneFileTxt.seqn).toBe('0'); + }); +}); diff --git a/src/tests-bns/bns-integration-tests.ts b/src/tests-bns/bns-integration-tests.ts index 2e34a15c..a12dfd97 100644 --- a/src/tests-bns/bns-integration-tests.ts +++ b/src/tests-bns/bns-integration-tests.ts @@ -1,6 +1,3 @@ - - - import { PgDataStore, cycleMigrations, runMigrations } from '../datastore/postgres-store'; import { PoolClient } from 'pg'; import { ApiServer, startApiServer } from '../api/init'; @@ -11,7 +8,6 @@ import { createHash } from 'crypto'; import { DbTx, DbTxStatus } from '../datastore/common'; import { AnchorMode, ChainID, PostConditionMode, someCV } from '@stacks/transactions'; import { StacksMocknet } from '@stacks/network'; - import { broadcastTransaction, bufferCV, @@ -26,8 +22,6 @@ import { import BigNum = require('bn.js'); import { logger } from '../helpers'; import { testnetKeys } from '../api/routes/debug'; -import { importV1BnsData } from '../import-v1'; -import * as assert from 'assert'; import { TestBlockBuilder } from '../test-utils/test-builders'; function hash160(bfr: Buffer): Buffer { @@ -101,10 +95,10 @@ describe('BNS integration tests', () => { body: JSON.stringify(body), headers: { 'Content-Type': 'application/json' }, }); - const submitResult = await apiResult.json(); + await apiResult.json(); const expectedTxId = '0x' + transaction.txid(); const result = await standByForTx(expectedTxId); - if (result.status != 1) logger.error('name-import error'); + if (result.status != 1) throw new Error('result status error'); await standbyBnsName(expectedTxId); return transaction; } @@ -173,10 +167,8 @@ describe('BNS integration tests', () => { async function initiateNamespaceNetwork(namespace: string, salt: Buffer, namespaceHash: Buffer, testnetKey: TestnetKey, expiration: number){ while (true) { try { - const preorderTransaction = await namespacePreorder(namespaceHash, testnetKey); - - const revealTransaction = await namespaceReveal(namespace, salt, testnetKey, expiration); - + await namespacePreorder(namespaceHash, testnetKey); + await namespaceReveal(namespace, salt, testnetKey, expiration); break; } catch (e) { console.log('error connection', e); @@ -194,13 +186,10 @@ describe('BNS integration tests', () => { network, anchorMode: AnchorMode.Any }; - const transaction = await makeContractCall(txOptions); await broadcastTransaction(transaction, network); - const readyResult = await standByForTx('0x' + transaction.txid()); if (readyResult.status != 1) logger.error('namespace-ready error'); - return transaction; } async function nameImport(namespace: string, zonefile: string, name: string, testnetKey: TestnetKey) { @@ -479,7 +468,7 @@ describe('BNS integration tests', () => { const zonefile = `$ORIGIN ${name}.${namespace}\n$TTL 3600\n_http._tcp IN URI 10 1 "https://blockstack.s3.amazonaws.com/${name}.${namespace}"\n`; const importZonefile = `$ORIGIN ${name}.${namespace}\n$TTL 3600\n_http._tcp IN URI 10 1 "https://blockstack.s3.amazonaws.com/${name}.${namespace}"\n`; const testnetKey = { pkey: testnetKeys[2].secretKey, address: testnetKeys[2].stacksAddress}; - // initializing namespace network + // initializing namespace network await initiateNamespaceNetwork(namespace, salt, namespaceHash, testnetKey, 12); await namespaceReady(namespace, testnetKey.pkey); @@ -515,7 +504,7 @@ describe('BNS integration tests', () => { const namespaceHash = hash160(Buffer.concat([Buffer.from(namespace), salt])); const testnetKey = { pkey: testnetKeys[4].secretKey, address: testnetKeys[4].stacksAddress}; const zonefile = `$ORIGIN ${name}.${namespace}\n$TTL 3600\n_http._tcp IN URI 10 1 "https://blockstack.s3.amazonaws.com/${name}.${namespace}"\n`; - + // initializing namespace network await initiateNamespaceNetwork(namespace, salt, namespaceHash, testnetKey, 12); await nameImport(namespace, zonefile, name, testnetKey); @@ -529,68 +518,78 @@ describe('BNS integration tests', () => { expect(query1.body.status).toBe('name-revoke'); }); - test('name-renewal contract call', async () => { + test('name-import/name-renewal contract call', async () => { const zonefile = `new zone file`; const namespace = 'name-renewal'; const name = 'renewal'; const namespaceHash = hash160(Buffer.concat([Buffer.from(namespace), salt])); const testnetKey = { pkey: testnetKeys[5].secretKey, address: testnetKeys[5].stacksAddress}; - + // initializing namespace network await initiateNamespaceNetwork(namespace, salt, namespaceHash, testnetKey, 1); await nameImport(namespace, zonefile, name, testnetKey); await namespaceReady(namespace, testnetKey.pkey); - //name renewal + // check expiration block + const query0 = await supertest(api.server).get(`/v1/names/${name}.${namespace}`); + expect(query0.status).toBe(200); + expect(query0.type).toBe('application/json'); + expect(query0.body.expire_block).toBe(0); // Imported names don't know about their namespaces + + // name renewal await nameRenewal(namespace, zonefile, testnetKey.pkey, name); - try { - const query1 = await supertest(api.server).get(`/v1/names/${name}.${namespace}`); - expect(query1.status).toBe(200); - expect(query1.type).toBe('application/json'); - expect(query1.body.zonefile).toBe(zonefile); - expect(query1.body.status).toBe('name-renewal'); - } catch (err: any) { - throw new Error('Error post transaction: ' + err.message); - } - }); - - test('bns v1-import', async () => { - await importV1BnsData(db, 'src/tests-bns/import-test-files'); - - // test on-chain name import - const query1 = await supertest(api.server).get(`/v1/names/zumrai.id`); + const query1 = await supertest(api.server).get(`/v1/names/${name}.${namespace}`); expect(query1.status).toBe(200); expect(query1.type).toBe('application/json'); - expect(query1.body).toEqual({ - address: 'SP29EJ0SVM2TRZ3XGVTZPVTKF4SV1VMD8C0GA5SK5', - blockchain: 'stacks', - expire_block: 52595, - last_txid: '', - status: 'name-register', - zonefile: - '$ORIGIN zumrai.id\n$TTL 3600\n_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1EPno1VcdGx89ukN2we4iVpnFtkHzw8i5d/profile.json"\n\n', - zonefile_hash: '853cd126478237bc7392e65091f7ffa5a1556a33', - }); + expect(query1.body.zonefile).toBe(zonefile); + expect(query1.body.status).toBe('name-renewal'); - // test subdomain import - const query2 = await supertest(api.server).get(`/v1/names/flushreset.id.blockstack`); + // Name should appear only once in namespace list + const query2 = await supertest(api.server).get(`/v1/namespaces/${namespace}/names`); expect(query2.status).toBe(200); expect(query2.type).toBe('application/json'); - expect(query2.body).toEqual({ - address: 'SP2S2F9TCAT43KEJT02YTG2NXVCPZXS1426T63D9H', - blockchain: 'stacks', - last_txid: '', - resolver: 'https://registrar.blockstack.org', - status: 'registered_subdomain', - zonefile: - '$ORIGIN flushreset.id.blockstack\n$TTL 3600\n_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1HEznKZ7mK5fmibweM7eAk8SwRgJ1bWY92/profile.json"\n\n', - zonefile_hash: '14dc091ebce8ea117e1276d802ee903cc0fdde81', - }); + expect(query2.body).toStrictEqual(["renewal.name-renewal"]); - const dbquery = await db.getSubdomain({ subdomain: `flushreset.id.blockstack`, includeUnanchored: false }); - assert(dbquery.found) - if (dbquery.result){ - expect(dbquery.result.name).toBe('id.blockstack');} + // check new expiration block, should not be 0 + const query3 = await supertest(api.server).get(`/v1/names/${name}.${namespace}`); + expect(query3.status).toBe(200); + expect(query3.type).toBe('application/json'); + expect(query3.body.expire_block).not.toBe(0); + }); + + test('name-register/name-renewal contract call', async () => { + const saltName = '0000'; + const zonefile = `new zone file`; + const namespace = 'name-renewal2'; + const name = 'renewal2'; + const namespaceHash = hash160(Buffer.concat([Buffer.from(namespace), salt])); + const testnetKey = { pkey: testnetKeys[5].secretKey, address: testnetKeys[5].stacksAddress}; + + // initializing namespace network + await initiateNamespaceNetwork(namespace, salt, namespaceHash, testnetKey, 1); + await namespaceReady(namespace, testnetKey.pkey); + await nameRegister(namespace, saltName, zonefile, testnetKey, name); + + // check expiration block, should not be 0 + const query0 = await supertest(api.server).get(`/v1/names/${name}.${namespace}`); + expect(query0.status).toBe(200); + expect(query0.type).toBe('application/json'); + expect(query0.body.expire_block).not.toBe(0); + const prevExpiration = query0.body.expire_block; + + // name renewal + await nameRenewal(namespace, zonefile, testnetKey.pkey, name); + const query1 = await supertest(api.server).get(`/v1/names/${name}.${namespace}`); + expect(query1.status).toBe(200); + expect(query1.type).toBe('application/json'); + expect(query1.body.zonefile).toBe(zonefile); + expect(query1.body.status).toBe('name-renewal'); + + // check new expiration block, should be greater than the previous one + const query3 = await supertest(api.server).get(`/v1/names/${name}.${namespace}`); + expect(query3.status).toBe(200); + expect(query3.type).toBe('application/json'); + expect(query3.body.expire_block > prevExpiration).toBe(true); }); afterAll(async () => { diff --git a/src/tests-bns/event-server-tests.ts b/src/tests-bns/event-server-tests.ts new file mode 100644 index 00000000..1596da79 --- /dev/null +++ b/src/tests-bns/event-server-tests.ts @@ -0,0 +1,305 @@ +import { ChainID } from '@stacks/transactions'; +import { PgDataStore, cycleMigrations, runMigrations } from '../datastore/postgres-store'; +import { PoolClient } from 'pg'; +import { bnsNameCV, httpPostRequest } from '../helpers'; +import { EventStreamServer, startEventServer } from '../event-stream/event-server'; +import { TestBlockBuilder, TestMicroblockStreamBuilder } from '../test-utils/test-builders'; +import { DbAssetEventTypeId, DbBnsZoneFile } from '../datastore/common'; + +describe('BNS event server tests', () => { + let db: PgDataStore; + let client: PoolClient; + let eventServer: EventStreamServer; + + beforeEach(async () => { + process.env.PG_DATABASE = 'postgres'; + await cycleMigrations(); + db = await PgDataStore.connect({ usageName: 'tests', withNotifier: false }); + client = await db.pool.connect(); + eventServer = await startEventServer({ + datastore: db, + chainId: ChainID.Mainnet, + serverHost: '127.0.0.1', + serverPort: 0, + httpLogLevel: 'debug', + }); + }); + + test('namespace-ready called by a contract other than BNS', async () => { + const block = new TestBlockBuilder({ + block_height: 1, + index_block_hash: '0x29fe7ba9674b9196fefa28764a35a4603065dc25c9dcf83c56648066f36a8dce', + burn_block_height: 749661, + burn_block_hash: '0x000000000000000000021e9777470811a937006cf47efceadefca2e8031c4b5f', + burn_block_time: 1660638853, + }) + .addTx() + .build(); + await db.update(block); + const microblock = new TestMicroblockStreamBuilder() + .addMicroblock({ + microblock_hash: '0x8455c986ef89d09968b96fee0ef5b4625aa3860aa68e70123efa129f48e55c6b', + microblock_sequence: 0, + parent_index_block_hash: '0x29fe7ba9674b9196fefa28764a35a4603065dc25c9dcf83c56648066f36a8dce' + }) + .build(); + await db.updateMicroblocks(microblock); + const payload = { + "events": [ + { + "txid": "0x605aa0554fb5ee7995f9780aa54d63b3d32550b0def95e31bdf3beb0fedefdae", + "type": "contract_event", + "committed": true, + "event_index": 50, + "contract_event": { + "topic": "print", + "raw_value": "0x0c00000003096e616d65737061636502000000046672656e0a70726f706572746965730c000000061963616e2d7570646174652d70726963652d66756e6374696f6e030b6c61756e636865642d61740a0100000000000000000000000000011886086c69666574696d65010000000000000000000000000000cd50106e616d6573706163652d696d706f727406161809f2ab9182b6ff1678f82846131c0709e51cf914636f6d6d756e6974792d68616e646c65732d76320e70726963652d66756e6374696f6e0c000000050462617365010000000c9f2c9cd04674edea3fffffff076275636b6574730b00000010010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000105636f6566660100000000000000000000000000000001116e6f2d766f77656c2d646973636f756e740100000000000000000000000000000001116e6f6e616c7068612d646973636f756e7401000000000000000000000000000000010b72657665616c65642d61740100000000000000000000000000011886067374617475730d000000057265616479", + "contract_identifier": "SP000000000000000000002Q6VF78.bns" + } + } + ], + "block_hash": "0x6be6bfbf5e63ee4333c794b0489a791625ad0724722647b748379fe916bbff55", + "miner_txid": "0x1c01668438115f757cfc14210f7f7ba0bee7f9d235c44b8e35c8653ac5879205", + "block_height": 2, + "transactions": [ + { + "txid": "0x605aa0554fb5ee7995f9780aa54d63b3d32550b0def95e31bdf3beb0fedefdae", + "raw_tx": "0x000000000104001809f2ab9182b6ff1678f82846131c0709e51cf900000000000000110000000000000bb80001e2ae2533ed444dcc3dc0118da5c8bbfe5da4c1943b63e3fd9b7389e3f7f384ee417a65d899182ff7791b174a426b947860df5b4006a0cb767aca275af847428d03020000000002161809f2ab9182b6ff1678f82846131c0709e51cf914636f6d6d756e6974792d68616e646c65732d7632106e616d6573706163652d72657665616c0000000402000000046672656e0200000003626f74010000000000000000000000000000cd5009", + "status": "success", + "tx_index": 46, + "raw_result": "0x0703", + "contract_abi": null, + "execution_cost": { + "runtime": 201050, + "read_count": 20, + "read_length": 92368, + "write_count": 4, + "write_length": 1386 + }, + "microblock_hash": "0x8455c986ef89d09968b96fee0ef5b4625aa3860aa68e70123efa129f48e55c6b", + "microblock_sequence": 0, + "microblock_parent_hash": "0xea7982ba6a5206b9efc2ab2567eedef3babae4d167619bdc74c7e148717dc208" + } + ], + "anchored_cost": { + "runtime": 19669668, + "read_count": 1420, + "read_length": 8457322, + "write_count": 143, + "write_length": 9331 + }, + "burn_block_hash": "0x00000000000000000004afca18622e18a1f36ff19dc1aece341868c042b7f4ac", + "burn_block_time": 1660639379, + "index_block_hash": "0xd3944c1cf261982ad5d86ad14b1545a2393c0039e378706323927b3a7031a621", + "burn_block_height": 749662, + "parent_block_hash": "0xea7982ba6a5206b9efc2ab2567eedef3babae4d167619bdc74c7e148717dc208", + "parent_microblock": "0x8455c986ef89d09968b96fee0ef5b4625aa3860aa68e70123efa129f48e55c6b", + "matured_miner_rewards": [], + "parent_burn_block_hash": "0x000000000000000000021e9777470811a937006cf47efceadefca2e8031c4b5f", + "parent_index_block_hash": "0x29fe7ba9674b9196fefa28764a35a4603065dc25c9dcf83c56648066f36a8dce", + "parent_burn_block_height": 749661, + "confirmed_microblocks_cost": { + "runtime": 174668984, + "read_count": 12067, + "read_length": 54026355, + "write_count": 1701, + "write_length": 134399 + }, + "parent_microblock_sequence": 0, + "parent_burn_block_timestamp": 1660638853 + }; + + await httpPostRequest({ + host: '127.0.0.1', + port: eventServer.serverAddress.port, + path: '/new_block', + headers: { 'Content-Type': 'application/json' }, + body: Buffer.from(JSON.stringify(payload), 'utf8'), + throwOnNotOK: true, + }); + + const namespaces = await db.getNamespaceList({ includeUnanchored: true }); + expect(namespaces.results).toStrictEqual(['fren']); + + const namespace = await db.getNamespace({ namespace: 'fren', includeUnanchored: true }); + expect(namespace.found).toBe(true); + expect(namespace.result?.namespace_id).toBe('fren'); + expect(namespace.result?.lifetime).toBe(52560); + expect(namespace.result?.status).toBe('ready'); + expect(namespace.result?.ready_block).toBe(2); + }); + + test('/attachments/new with re-orged zonefiles', async () => { + const block1 = new TestBlockBuilder({ + block_height: 1, + index_block_hash: '0x0101', + }) + .addTx() + .addTxBnsNamespace({ namespace_id: 'btc' }) + .addTxBnsName({ name: 'jnj.btc', namespace_id: 'btc' }) + .addTxNftEvent({ + asset_event_type_id: DbAssetEventTypeId.Mint, + value: bnsNameCV('jnj.btc'), + asset_identifier: 'SP000000000000000000002Q6VF78.bns::names', + recipient: 'ST5RRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1ZA', + }) + .build(); + await db.update(block1); + + const block2 = new TestBlockBuilder({ + block_height: 2, + index_block_hash: '0x0200', + parent_index_block_hash: '0x0101' + }) + .addTx({ tx_id: '0x1212' }) + .addTxBnsName({ + name: 'jnj.btc', + namespace_id: 'btc', + status: 'name-update', // Canonical update + tx_id: '0x1212', + zonefile_hash: '0x9198e0b61a029671e53bd59aa229e7ae05af35a3' + }) + .build(); + await db.update(block2); + + const block2b = new TestBlockBuilder({ + block_height: 2, + index_block_hash: '0x0201', + parent_index_block_hash: '0x0101' + }) + .addTx({ tx_id: '0x121266' }) + .addTxBnsName({ + name: 'jnj.btc', + namespace_id: 'btc', + status: 'name-update', // Non-canonical update + tx_id: '0x121266', + zonefile_hash: '0xffff' + }) + .build(); + await db.update(block2b); + + const block3 = new TestBlockBuilder({ + block_height: 3, + index_block_hash: '0x0300', + parent_index_block_hash: '0x0200' + }) + .addTx({ tx_id: '0x3333' }) + .build(); + await db.update(block3); + + const payload = [ + { + "tx_id": "0x1212", // Canonical + "content": "0x244f524947494e206a6e6a2e6274632e0a2454544c20333630300a5f687474702e5f74637009494e095552490931300931092268747470733a2f2f676169612e626c6f636b737461636b2e6f72672f6875622f317a38417a79684334326e3854766f4661554c326e7363614347487151515755722f70726f66696c652e6a736f6e220a0a", + "metadata": "0x0c00000004046e616d6502000000036a6e6a096e616d6573706163650200000003627463026f700d0000000d6e616d652d72656769737465720974782d73656e64657205163763c6b37100efa8261e5fc1b1e8c18cd3fed9b6", + "contract_id": "SP000000000000000000002Q6VF78.bns", + "block_height": 17307, + "content_hash": "0x9198e0b61a029671e53bd59aa229e7ae05af35a3", + "attachment_index": 823, + "index_block_hash": "0x0200" + }, + { + "tx_id": "0x121266", // Non-canonical + "content": "0x", + "metadata": "0x0c00000004046e616d6502000000036a6e6a096e616d6573706163650200000003627463026f700d0000000d6e616d652d72656769737465720974782d73656e64657205163763c6b37100efa8261e5fc1b1e8c18cd3fed9b6", + "contract_id": "SP000000000000000000002Q6VF78.bns", + "block_height": 17307, + "content_hash": "0xffff", + "attachment_index": 823, + "index_block_hash": "0x0201" + }, + ]; + + await httpPostRequest({ + host: '127.0.0.1', + port: eventServer.serverAddress.port, + path: '/attachments/new', + headers: { 'Content-Type': 'application/json' }, + body: Buffer.from(JSON.stringify(payload), 'utf8'), + throwOnNotOK: true, + }); + + const name = await db.getName({ name: 'jnj.btc', chainId: ChainID.Mainnet, includeUnanchored: true }); + expect(name.found).toBe(true); + expect(name.result?.zonefile_hash).toBe('9198e0b61a029671e53bd59aa229e7ae05af35a3'); + expect(name.result?.index_block_hash).toBe('0x0200'); + expect(name.result?.tx_id).toBe('0x1212'); + expect(name.result?.status).toBe('name-update'); + }); + + test('/attachments/new with duplicate zonefiles for the same tx', async () => { + const block1 = new TestBlockBuilder({ + block_height: 1, + index_block_hash: '0x0101', + }) + .addTx({ tx_id: '0x1234' }) + .addTxBnsNamespace({ namespace_id: 'btc' }) + .addTxBnsName({ + name: 'jnj.btc', + namespace_id: 'btc', + zonefile_hash: '0x9198e0b61a029671e53bd59aa229e7ae05af35a3' + }) + .addTxNftEvent({ + asset_event_type_id: DbAssetEventTypeId.Mint, + value: bnsNameCV('jnj.btc'), + asset_identifier: 'SP000000000000000000002Q6VF78.bns::names', + recipient: 'ST5RRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1ZA', + }) + .build(); + await db.update(block1); + + const payload = [ + { + "tx_id": "0x1234", + "content": "0x", + "metadata": "0x0c00000004046e616d6502000000036a6e6a096e616d6573706163650200000003627463026f700d0000000d6e616d652d72656769737465720974782d73656e64657205163763c6b37100efa8261e5fc1b1e8c18cd3fed9b6", + "contract_id": "SP000000000000000000002Q6VF78.bns", + "block_height": 1, + "content_hash": "0x9198e0b61a029671e53bd59aa229e7ae05af35a3", + "attachment_index": 823, + "index_block_hash": "0x0101" + }, + { + "tx_id": "0x1234", + "content": "0x244f524947494e206a6e6a2e6274632e0a2454544c20333630300a5f687474702e5f74637009494e095552490931300931092268747470733a2f2f676169612e626c6f636b737461636b2e6f72672f6875622f317a38417a79684334326e3854766f4661554c326e7363614347487151515755722f70726f66696c652e6a736f6e220a0a", + "metadata": "0x0c00000004046e616d6502000000036a6e6a096e616d6573706163650200000003627463026f700d0000000d6e616d652d72656769737465720974782d73656e64657205163763c6b37100efa8261e5fc1b1e8c18cd3fed9b6", + "contract_id": "SP000000000000000000002Q6VF78.bns", + "block_height": 1, + "content_hash": "0x9198e0b61a029671e53bd59aa229e7ae05af35a3", // Same zonefile_hash but different content, this should overwrite the entry above + "attachment_index": 823, + "index_block_hash": "0x0101" + }, + { + "tx_id": "0x1234", + "content": "0x244f524947494e206a6e6a2e6274632e0a2454544c20333630300a5f687474702e5f74637009494e095552490931300931092268747470733a2f2f676169612e626c6f636b737461636b2e6f72672f6875622f317a38417a79684334326e3854766f4661554c326e7363614347487151515755722f70726f66696c652e6a736f6e220a0a", + "metadata": "0x0c00000004046e616d6502000000036a6e6a096e616d6573706163650200000003627463026f700d0000000d6e616d652d72656769737465720974782d73656e64657205163763c6b37100efa8261e5fc1b1e8c18cd3fed9b6", + "contract_id": "SP000000000000000000002Q6VF78.bns", + "block_height": 1, + "content_hash": "0x9198e0b61a029671e53bd59aa229e7ae05af35a3", // Also overwrite + "attachment_index": 823, + "index_block_hash": "0x0101" + }, + ]; + + await httpPostRequest({ + host: '127.0.0.1', + port: eventServer.serverAddress.port, + path: '/attachments/new', + headers: { 'Content-Type': 'application/json' }, + body: Buffer.from(JSON.stringify(payload), 'utf8'), + throwOnNotOK: true, + }); + + // To validate table data we'll query it directly. There should only be one zonefile. + const result = await client.query(`SELECT * FROM zonefiles`); + expect(result.rowCount).toBe(1); + expect(result.rows[0].zonefile).toBe('$ORIGIN jnj.btc.\n$TTL 3600\n_http._tcp\tIN\tURI\t10\t1\t"https://gaia.blockstack.org/hub/1z8AzyhC42n8TvoFaUL2nscaCGHqQQWUr/profile.json"\n\n'); + }); + + afterEach(async () => { + await eventServer.closeAsync(); + client.release(); + await db?.close(); + await runMigrations(undefined, 'down'); + }); +}); diff --git a/src/tests-bns/v1-import-tests.ts b/src/tests-bns/v1-import-tests.ts new file mode 100644 index 00000000..47283d22 --- /dev/null +++ b/src/tests-bns/v1-import-tests.ts @@ -0,0 +1,161 @@ +import { PgDataStore, cycleMigrations, runMigrations } from '../datastore/postgres-store'; +import { PoolClient } from 'pg'; +import { ApiServer, startApiServer } from '../api/init'; +import * as supertest from 'supertest'; +import { startEventServer } from '../event-stream/event-server'; +import { Server } from 'net'; +import { ChainID } from '@stacks/transactions'; +import { importV1BnsData } from '../import-v1'; +import * as assert from 'assert'; +import { TestBlockBuilder } from '../test-utils/test-builders'; + +describe('BNS V1 import', () => { + let db: PgDataStore; + let client: PoolClient; + let eventServer: Server; + let api: ApiServer; + + beforeEach(async () => { + process.env.PG_DATABASE = 'postgres'; + await cycleMigrations(); + db = await PgDataStore.connect({ usageName: 'tests' }); + client = await db.pool.connect(); + eventServer = await startEventServer({ datastore: db, chainId: ChainID.Testnet, httpLogLevel: 'silly' }); + api = await startApiServer({ datastore: db, chainId: ChainID.Testnet, httpLogLevel: 'silly' }); + + const block = new TestBlockBuilder().build(); + await db.update(block); + }); + + test('v1-import', async () => { + await importV1BnsData(db, 'src/tests-bns/import-test-files'); + + // Names + const query1 = await supertest(api.server).get(`/v1/names/zumrai.id`); + expect(query1.status).toBe(200); + expect(query1.type).toBe('application/json'); + expect(query1.body).toEqual({ + address: 'SP29EJ0SVM2TRZ3XGVTZPVTKF4SV1VMD8C0GA5SK5', + blockchain: 'stacks', + expire_block: 52595, + last_txid: '', + status: 'name-register', + zonefile: + '$ORIGIN zumrai.id\n$TTL 3600\n_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1EPno1VcdGx89ukN2we4iVpnFtkHzw8i5d/profile.json"\n\n', + zonefile_hash: '853cd126478237bc7392e65091f7ffa5a1556a33', + }); + + const query2 = await supertest(api.server).get(`/v1/names/zumrai.id/zonefile/853cd126478237bc7392e65091f7ffa5a1556a33`); + expect(query2.status).toBe(200); + expect(query2.type).toBe('application/json'); + expect(query2.body).toEqual({ + zonefile: '$ORIGIN zumrai.id\n$TTL 3600\n_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1EPno1VcdGx89ukN2we4iVpnFtkHzw8i5d/profile.json"\n\n', + }); + + const query3 = await supertest(api.server).get(`/v1/names/zumrai.id/zonefile`); + expect(query3.status).toBe(200); + expect(query3.type).toBe('application/json'); + expect(query3.body).toEqual({ + zonefile: '$ORIGIN zumrai.id\n$TTL 3600\n_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1EPno1VcdGx89ukN2we4iVpnFtkHzw8i5d/profile.json"\n\n', + }); + + const query4 = await supertest(api.server).get(`/v1/names/id.blockstack/subdomains`); + expect(query4.status).toBe(200); + expect(query4.type).toBe('application/json'); + expect(query4.body.sort()).toStrictEqual([ + "12312313231.id.blockstack", "aichamez.id.blockstack", "ale082308as.id.blockstack", + "alejandro772.id.blockstack", "alkorsandor8_2.id.blockstack", "amir4good.id.blockstack", + "anasa680.id.blockstack", "ancafajardo.id.blockstack", "angelessebastian.id.blockstack", + "blafus3l.id.blockstack", "caomicoje.id.blockstack", "con_adrada34516.id.blockstack", + "cryptichorizon.id.blockstack", "drgenius.id.blockstack", "drifting_dude.id.blockstack", + "enavarrocollin.id.blockstack", "entryist.id.blockstack", "flushreset.id.blockstack", + "harukoscarlet.id.blockstack", "hintonh924.id.blockstack", "johnkinney.id.blockstack", + "jokialternative.id.blockstack", "joren_instance.id.blockstack", "kerodriguez.id.blockstack", + "krishares10.id.blockstack", "liviaelyse.id.blockstack", "luke_mwenya1.id.blockstack", + "milkyymocha.id.blockstack", "mithical.id.blockstack", "mrbotham.id.blockstack", + "mymansgotabeefy1.id.blockstack", "neelyblake996.id.blockstack", "nihal_t_m.id.blockstack", + "okamii63.id.blockstack", "robertascardoso.id.blockstack", "sheridoug.id.blockstack", + "sipapi19.id.blockstack", "slemanb44.id.blockstack", "slimttfu.id.blockstack", + "splevine.id.blockstack", "sportsman66.id.blockstack", "starbvuks.id.blockstack", + "subtly_fresh.id.blockstack", "svirchok.id.blockstack", "theironcook.id.blockstack", + "thingnotok.id.blockstack", "ujku1977.id.blockstack", "yanadda9.id.blockstack", + "yoemmx00.id.blockstack", "zachgaming.id.blockstack" + ].sort()); + + const query5 = await supertest(api.server).get(`/v1/names/`); + expect(query5.status).toBe(200); + expect(query5.type).toBe('application/json'); + expect(query5.body.sort()).toStrictEqual([ + "0.id", "1.id", "10.id", "10x.id", "111111111.id", "123.id", "zinai.id", "zlh.id", + "zone117x.id", "zumminer_crux.id", "zumminer_dev_crux.id", "zumrai.id", + ].sort()); + + // Namespaces + const query6 = await supertest(api.server).get(`/v1/namespaces/`); + expect(query6.status).toBe(200); + expect(query6.type).toBe('application/json'); + expect(query6.body).toEqual({ + namespaces: ["blockstack", "graphite", "helloworld", "id", "podcast"] + }); + + const query7 = await supertest(api.server).get(`/v1/namespaces/id/names`); + expect(query7.status).toBe(200); + expect(query7.type).toBe('application/json'); + expect(query7.body.sort()).toStrictEqual([ + "0.id", "1.id", "10.id", "10x.id", "111111111.id", "123.id", "zinai.id", "zlh.id", + "zone117x.id", "zumminer_crux.id", "zumminer_dev_crux.id", "zumrai.id" + ].sort()); + + // Addresses + const query8 = await supertest(api.server).get(`/v1/addresses/stacks/SP1HPCXTGV31W5659M3WTBEFP5AN55HV4B1Q9T31F`); + expect(query8.status).toBe(200); + expect(query8.type).toBe('application/json'); + expect(query8.body).toEqual({ + names: ["0.id"] + }); + + // Subdomains + const query9 = await supertest(api.server).get(`/v1/names/flushreset.id.blockstack`); + expect(query9.status).toBe(200); + expect(query9.type).toBe('application/json'); + expect(query9.body).toEqual({ + address: 'SP2S2F9TCAT43KEJT02YTG2NXVCPZXS1426T63D9H', + blockchain: 'stacks', + last_txid: '', + resolver: 'https://registrar.blockstack.org', + status: 'registered_subdomain', + zonefile: + '$ORIGIN flushreset.id.blockstack\n$TTL 3600\n_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1HEznKZ7mK5fmibweM7eAk8SwRgJ1bWY92/profile.json"\n\n', + zonefile_hash: '14dc091ebce8ea117e1276d802ee903cc0fdde81', + }); + + const query10 = await supertest(api.server).get(`/v1/names/flushreset.id.blockstack/zonefile/14dc091ebce8ea117e1276d802ee903cc0fdde81`); + expect(query10.status).toBe(200); + expect(query10.type).toBe('application/json'); + expect(query10.body).toEqual({ + zonefile: + '$ORIGIN flushreset.id.blockstack\n$TTL 3600\n_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1HEznKZ7mK5fmibweM7eAk8SwRgJ1bWY92/profile.json"\n\n', + }); + + const query11 = await supertest(api.server).get(`/v1/names/flushreset.id.blockstack/zonefile`); + expect(query11.status).toBe(200); + expect(query11.type).toBe('application/json'); + expect(query11.body).toEqual({ + zonefile: + '$ORIGIN flushreset.id.blockstack\n$TTL 3600\n_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1HEznKZ7mK5fmibweM7eAk8SwRgJ1bWY92/profile.json"\n\n', + }); + + const dbquery = await db.getSubdomain({ subdomain: `flushreset.id.blockstack`, includeUnanchored: false }); + assert(dbquery.found) + if (dbquery.result){ + expect(dbquery.result.name).toBe('id.blockstack');} + }); + + afterEach(async () => { + await new Promise(resolve => eventServer.close(() => resolve(true))); + await api.terminate(); + client.release(); + await db?.close(); + await runMigrations(undefined, 'down'); + }); +}); diff --git a/src/tests/bns-helpers-tests.ts b/src/tests/bns-helpers-tests.ts deleted file mode 100644 index 018179e8..00000000 --- a/src/tests/bns-helpers-tests.ts +++ /dev/null @@ -1,95 +0,0 @@ -import { parseNamespaceRawValue, parseNameRawValue, parseZoneFileTxt } from '../bns-helpers'; -import * as zoneFileParser from 'zone-file'; -test('Success: namespace parsed', () => { - const expectedNamespace = { - namespace_id: 'xyz', - address: 'ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH', - base: 1, - coeff: 1, - launched_at: 14, - lifetime: 1, - no_vowel_discount: 1, - nonalpha_discount: 1, - ready_block: 4, - reveal_block: 6, - status: 'ready', - buckets: '1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1', - tx_id: '0x2114c8cda9e829f8b5d3c4163724ae9c4d9142d2bae4a35bffb006408d21c0ab', - index_block_hash: '0xdeadbeef', - }; - - const namespace = parseNamespaceRawValue( - // This value comes from Smart Contract Event (event.contract_event.raw_value) - '0x0c00000003096e616d657370616365020000000378797a0a70726f706572746965730c000000050b6c61756e636865642d61740a010000000000000000000000000000000e086c69666574696d650100000000000000000000000000000001106e616d6573706163652d696d706f7274051abf8e82623c380cd870931d48b525d5e12a4d67820e70726963652d66756e6374696f6e0c0000000504626173650100000000000000000000000000000001076275636b6574730b00000010010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000105636f6566660100000000000000000000000000000001116e6f2d766f77656c2d646973636f756e740100000000000000000000000000000001116e6f6e616c7068612d646973636f756e7401000000000000000000000000000000010b72657665616c65642d61740100000000000000000000000000000006067374617475730d000000057265616479', - 4, - '0x2114c8cda9e829f8b5d3c4163724ae9c4d9142d2bae4a35bffb006408d21c0ab', - 0 - ); - - expect(namespace?.address).toEqual(expectedNamespace.address); - expect(namespace?.namespace_id).toEqual(expectedNamespace.namespace_id); - expect(namespace?.base).toEqual(expectedNamespace.base); - expect(namespace?.coeff).toEqual(expectedNamespace.coeff); - expect(namespace?.launched_at).toEqual(expectedNamespace.launched_at); - expect(namespace?.lifetime).toEqual(expectedNamespace.lifetime); - expect(namespace?.no_vowel_discount).toEqual(expectedNamespace.no_vowel_discount); - expect(namespace?.nonalpha_discount).toEqual(expectedNamespace.nonalpha_discount); - expect(namespace?.ready_block).toEqual(expectedNamespace.ready_block); - expect(namespace?.reveal_block).toEqual(expectedNamespace.reveal_block); - expect(namespace?.status).toEqual(expectedNamespace.status); - expect(namespace?.buckets).toEqual(expectedNamespace.buckets); - expect(namespace?.tx_id).toEqual(expectedNamespace.tx_id); -}); - -test('Success: parse name raw value', () => { - const expectedName = { - attachment: { - hash: 'c5217bcb3e52612ff7c835f9bb46a5f86aa73b8d', - metadata: { - name: 'abcdef', - namespace: 'xyz', - tx_sender: { - type: 0, - version: 26, - hash160: 'bf8e82623c380cd870931d48b525d5e12a4d6782', - }, - op: 'name-import', - }, - }, - }; - - const expectedAttachment = expectedName.attachment; - - const name = parseNameRawValue( - // This value comes from Smart Contract Event (event.contract_event.raw_value) - '0x0c000000010a6174746163686d656e740c00000003106174746163686d656e742d696e646578010000000000000000000000000000000004686173680200000014c5217bcb3e52612ff7c835f9bb46a5f86aa73b8d086d657461646174610c00000004046e616d650200000006616263646566096e616d657370616365020000000378797a026f700d0000000b6e616d652d696d706f72740974782d73656e646572051abf8e82623c380cd870931d48b525d5e12a4d6782' - ); - - const attachment = name.attachment; - - expect(attachment.hash).toEqual(expectedAttachment.hash); - expect(attachment.metadata.name).toEqual(expectedAttachment.metadata.name); - expect(attachment.metadata.namespace).toEqual(expectedAttachment.metadata.namespace); - expect(attachment.metadata.op).toEqual(expectedAttachment.metadata.op); - expect(attachment.metadata.tx_sender.version).toEqual( - expectedAttachment.metadata.tx_sender.version - ); - expect(attachment.metadata.tx_sender.hash160).toEqual( - expectedAttachment.metadata.tx_sender.hash160 - ); -}); - -test('Parse TXT', () => { - const subdomain = `$ORIGIN abcdef.xyz - $TTL 3600 - asim IN TXT "owner=ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH" "seqn=0" "parts=1" "zf0=JE9SSUdJTiBhc2ltCiRUVEwgMzYwMApfaHR0cHMuX3RjcCBVUkkgMTAgMSAiaHR0cHM6Ly9nYWlhLmJsb2Nrc3RhY2sub3JnL2h1Yi9TVDJaUlgwSzI3R1cwU1AzR0pDRU1IRDk1VFFHSk1LQjdHOVkwWDFNSC9wcm9maWxlLmpzb24iCg==" - _http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1M3325hr1utdv4HhSAfvYKhapzPP9Axhde/profile.json" - _resolver IN URI 10 1 "http://localhost:3000" - `; - - const parsedZoneFile = zoneFileParser.parseZoneFile(subdomain); - const zoneFileTxt = parseZoneFileTxt(parsedZoneFile.txt?.[0].txt as string[]); - expect(zoneFileTxt.owner).toBe('ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH'); - expect(zoneFileTxt.parts).toBe('1'); - expect(zoneFileTxt.seqn).toBe('0'); -}); diff --git a/src/tests/datastore-tests.ts b/src/tests/datastore-tests.ts index 5129e3f1..d5bb322f 100644 --- a/src/tests/datastore-tests.ts +++ b/src/tests/datastore-tests.ts @@ -2761,8 +2761,8 @@ describe('postgres datastore', () => { tx_index: 0, namespace_id: 'abc', address: 'ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH', - base: 1, - coeff: 1, + base: 1n, + coeff: 1n, launched_at: 14, lifetime: 1, no_vowel_discount: 1, @@ -3765,8 +3765,8 @@ describe('postgres datastore', () => { { namespace_id: 'abc', address: 'ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH', - base: 1, - coeff: 1, + base: 1n, + coeff: 1n, launched_at: 14, lifetime: 1, no_vowel_discount: 1, @@ -3959,8 +3959,8 @@ describe('postgres datastore', () => { { namespace_id: 'abc', address: 'ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH', - base: 1, - coeff: 1, + base: 1n, + coeff: 1n, launched_at: 14, lifetime: 1, no_vowel_discount: 1, @@ -4481,8 +4481,8 @@ describe('postgres datastore', () => { const namespace: DbBnsNamespace = { namespace_id: 'abc', address: 'ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH', - base: 1, - coeff: 1, + base: 1n, + coeff: 1n, launched_at: dbBlock.block_height, lifetime: 1, no_vowel_discount: 1, @@ -4615,8 +4615,7 @@ describe('postgres datastore', () => { const subdomains: DbBnsSubdomain[] = []; subdomains.push(subdomain); - await db.updateBatchSubdomains( - client, + await db.resolveBnsSubdomains( { index_block_hash: dbBlock.index_block_hash, parent_index_block_hash: dbBlock.parent_index_block_hash, From 2c8632fe8a43b043013d60b14a0c40bd6ad087fa Mon Sep 17 00:00:00 2001 From: CharlieC3 <2747302+CharlieC3@users.noreply.github.com> Date: Fri, 26 Aug 2022 12:14:49 -0400 Subject: [PATCH 09/24] ci: analyze commits using conventional commits --- .github/workflows/ci.yml | 1 + package.json | 8 ++++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 30636308..5ff16592 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -512,6 +512,7 @@ jobs: @semantic-release/changelog @semantic-release/git @semantic-release/exec + conventional-changelog-conventionalcommits - name: Set up Docker Buildx uses: docker/setup-buildx-action@v1 diff --git a/package.json b/package.json index 07911d6d..322e9d1a 100644 --- a/package.json +++ b/package.json @@ -56,8 +56,12 @@ "engineStrict": true, "release": { "plugins": [ - "@semantic-release/commit-analyzer", - "@semantic-release/release-notes-generator", + ["@semantic-release/commit-analyzer", { + "preset": "conventionalcommits" + }], + ["@semantic-release/release-notes-generator", { + "preset": "conventionalcommits" + }], [ "@semantic-release/exec", { From b3338e3a52591a5d8a2d586070dbd9d3680052ad Mon Sep 17 00:00:00 2001 From: semantic-release-bot Date: Fri, 26 Aug 2022 16:22:46 +0000 Subject: [PATCH 10/24] chore(release): 5.0.0-beta.1 [skip ci] MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## [5.0.0-beta.1](https://github.com/hirosystems/stacks-blockchain-api/compare/v4.1.2...v5.0.0-beta.1) (2022-08-26) ### ⚠ BREAKING CHANGES * optimize tables and improve canonical treatment of BNS data (#1287) ### Features * optimize tables and improve canonical treatment of BNS data ([#1287](https://github.com/hirosystems/stacks-blockchain-api/issues/1287)) ([1f64818](https://github.com/hirosystems/stacks-blockchain-api/commit/1f648187b8c701e802a06bac52b077fd10571ff7)) --- CHANGELOG.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 91bb6359..92b74610 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,14 @@ +## [5.0.0-beta.1](https://github.com/hirosystems/stacks-blockchain-api/compare/v4.1.2...v5.0.0-beta.1) (2022-08-26) + + +### ⚠ BREAKING CHANGES + +* optimize tables and improve canonical treatment of BNS data (#1287) + +### Features + +* optimize tables and improve canonical treatment of BNS data ([#1287](https://github.com/hirosystems/stacks-blockchain-api/issues/1287)) ([1f64818](https://github.com/hirosystems/stacks-blockchain-api/commit/1f648187b8c701e802a06bac52b077fd10571ff7)) + ## [4.1.2](https://github.com/hirosystems/stacks-blockchain-api/compare/v4.1.1...v4.1.2) (2022-08-18) From 619c176bffddc5570eaf86321825c2f4fadbf43f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafael=20C=C3=A1rdenas?= Date: Fri, 26 Aug 2022 13:25:23 -0500 Subject: [PATCH 11/24] chore: handle duplicate TSV rows during event import (#1297) * chore: return handler for duplicate TSV rows * chore: add log --- src/datastore/postgres-store.ts | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/src/datastore/postgres-store.ts b/src/datastore/postgres-store.ts index 2c9fd6b8..46df66e8 100644 --- a/src/datastore/postgres-store.ts +++ b/src/datastore/postgres-store.ts @@ -1037,11 +1037,24 @@ export class PgDataStore payload jsonb NOT NULL ) ON COMMIT DROP `); + // Use a `temp_raw_tsv` table first to store the raw TSV data as it might come with duplicate + // rows which would trigger the `PRIMARY KEY` constraint in `temp_event_observer_requests`. + // We will "upsert" from the former to the latter before event ingestion. + await client.query(` + CREATE TEMPORARY TABLE temp_raw_tsv + (LIKE temp_event_observer_requests) + ON COMMIT DROP + `); onStatusUpdate?.('Importing raw event requests into temporary table...'); - const importStream = client.query( - pgCopyStreams.from(`COPY temp_event_observer_requests FROM STDIN`) - ); + const importStream = client.query(pgCopyStreams.from(`COPY temp_raw_tsv FROM STDIN`)); await pipelineAsync(readStream, importStream); + onStatusUpdate?.('Removing any duplicate raw event requests...'); + await client.query(` + INSERT INTO temp_event_observer_requests + SELECT * + FROM temp_raw_tsv + ON CONFLICT DO NOTHING; + `); const totalRowCountQuery = await client.query<{ count: string }>( `SELECT COUNT(id) count FROM temp_event_observer_requests` ); From 763d99e8d3df6c0f1446e513b8ab36240534d264 Mon Sep 17 00:00:00 2001 From: semantic-release-bot Date: Fri, 26 Aug 2022 18:54:11 +0000 Subject: [PATCH 13/24] chore(release): 5.0.0-beta.2 [skip ci] ## [5.0.0-beta.2](https://github.com/hirosystems/stacks-blockchain-api/compare/v5.0.0-beta.1...v5.0.0-beta.2) (2022-08-26) ### Bug Fixes * bump version ([3863cce](https://github.com/hirosystems/stacks-blockchain-api/commit/3863cce1a64cf7a4c6cffd4f888c049cfd3ada65)) --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 92b74610..c47581cd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,10 @@ +## [5.0.0-beta.2](https://github.com/hirosystems/stacks-blockchain-api/compare/v5.0.0-beta.1...v5.0.0-beta.2) (2022-08-26) + + +### Bug Fixes + +* bump version ([3863cce](https://github.com/hirosystems/stacks-blockchain-api/commit/3863cce1a64cf7a4c6cffd4f888c049cfd3ada65)) + ## [5.0.0-beta.1](https://github.com/hirosystems/stacks-blockchain-api/compare/v4.1.2...v5.0.0-beta.1) (2022-08-26) From bc59817aa98dd3a978a27b73d14738b64eb823f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafael=20C=C3=A1rdenas?= Date: Wed, 31 Aug 2022 08:37:43 -0500 Subject: [PATCH 14/24] fix: import BNS v1 data during event replay (#1301) * fix: import v1 data during replay * fix: import names first, subdomains last * feat: obtain genesis block data from tsv * fix: v1 import tests * fix: import route * fix: api test * fix: move to for await of * docs: update README to reflect new replay --- readme.md | 66 +++++++++++---- running_an_api.md | 30 +------ running_api_from_source.md | 27 +----- src/datastore/common.ts | 4 +- src/datastore/postgres-store.ts | 8 +- src/event-replay/event-replay.ts | 20 ++++- src/event-replay/helpers.ts | 50 +++++++++++ src/import-v1/index.ts | 137 ++++++++++++++++++------------- src/index.ts | 17 ---- src/tests-bns/api.ts | 4 +- src/tests-bns/v1-import-tests.ts | 25 ++++-- 11 files changed, 233 insertions(+), 155 deletions(-) diff --git a/readme.md b/readme.md index 45952357..edff1ddb 100644 --- a/readme.md +++ b/readme.md @@ -98,19 +98,51 @@ For running offline mode set an environment variable `STACKS_API_MODE=offline` ## Event Replay -The stacks-node is only able to emit events live as they happen. This poses a problem in the scenario where the stacks-blockchain-api needs to -be upgraded and its database cannot be migrated to a new schema. One way to handle this upgrade is to wipe the stacks-blockchain-api's database -and stacks-node working directory, and re-sync from scratch. +The stacks-node is only able to emit events live as they happen. This poses a problem in the +scenario where the stacks-blockchain-api needs to be upgraded and its database cannot be migrated to +a new schema. One way to handle this upgrade is to wipe the stacks-blockchain-api's database and +stacks-node working directory, and re-sync from scratch. -Alternatively, an event-replay feature is available where the API records the HTTP POST requests from the stacks-node event emitter, then streams -these events back to itself. Essentially simulating a wipe & full re-sync, but much quicker. +Alternatively, an event-replay feature is available where the API records the HTTP POST requests +from the stacks-node event emitter, then streams these events back to itself. Essentially simulating +a wipe & full re-sync, but much quicker. -The feature can be used via program args. For example, if there are breaking changes in the API's sql schema, like adding a new column which requires -event's to be re-played, the following steps could be ran: +The feature can be used via program args. For example, if there are breaking changes in the API's +sql schema, like adding a new column which requires event's to be re-played, the following steps +could be ran: ### Event Replay Instructions -1. Ensure the API process is not running. When stopping the API, let the process exit gracefully so that any in-progress SQL writes can finish. +#### V1 BNS Data + +**Optional but recommended** - If you want the V1 BNS data, there are going to be a few extra steps: + +1. Download BNS data: + ```shell + curl -L https://storage.googleapis.com/blockstack-v1-migration-data/export-data.tar.gz -o /stacks-node/bns/export-data.tar.gz + ``` +1. Extract it: + ```shell + tar -xzvf ./bns/export-data.tar.gz -C /stacks-node/bns/ + ``` +1. Each file in `./bns` will have a corresponding `sha256` value. To Verify, run a script like the + following to check the sha256sum: + + ```bash + for file in `ls /stacks-node/bns/* | grep -v sha256 | grep -v .tar.gz`; do + if [ $(sha256sum $file | awk {'print $1'}) == $(cat ${file}.sha256 ) ]; then + echo "sha256 Matched $file" + else + echo "sha256 Mismatch $file" + fi + done + ``` +1. Set the data's location as the value of `BNS_IMPORT_DIR` in your `.env` file. + +#### Export and Import + +1. Ensure the API process is not running. When stopping the API, let the process exit gracefully so + that any in-progress SQL writes can finish. 1. Export event data to disk with the `export-events` command: ```shell @@ -119,19 +151,25 @@ event's to be re-played, the following steps could be ran: 1. Update to the new stacks-blockchain-api version. 1. Perform the event playback using the `import-events` command: - **WARNING**: This will **drop _all_ tables** from the configured Postgres database, including any tables not automatically added by the API. + **WARNING**: This will **drop _all_ tables** from the configured Postgres database, including any + tables not automatically added by the API. ```shell node ./lib/index.js import-events --file /tmp/stacks-node-events.tsv --wipe-db --force ``` This command has two modes of operation, specified by the `--mode` option: - * `archival` (default): The process will import and ingest *all* blockchain events that have happened since the first block. - * `pruned`: The import process will ignore some prunable events (mempool, microblocks) until the import block height has reached `chain tip - 256` blocks. This saves a considerable amount of time during import, but sacrifices some historical data. You can use this mode if you're mostly interested in running an API that prioritizes real time information. + * `archival` (default): The process will import and ingest *all* blockchain events that have + happened since the first block. + * `pruned`: The import process will ignore some prunable events (mempool, microblocks) until the + import block height has reached `chain tip - 256` blocks. This saves a considerable amount of + time during import, but sacrifices some historical data. You can use this mode if you're mostly + interested in running an API that prioritizes real time information. -Alternatively, instead of performing the `export-events` command in step 1, an environmental variable can be set which enables events to be streamed to a file -as they are received, while the application is running normally. To enable this feature, set the `STACKS_EXPORT_EVENTS_FILE` env var to the file path where -events should be appended. Example: +Alternatively, instead of performing the `export-events` command in step 1, an environmental +variable can be set which enables events to be streamed to a file as they are received, while the +application is running normally. To enable this feature, set the `STACKS_EXPORT_EVENTS_FILE` env var +to the file path where events should be appended. Example: ``` STACKS_EXPORT_EVENTS_FILE=/tmp/stacks-node-events.tsv ``` diff --git a/running_an_api.md b/running_an_api.md index 0c47a1dd..840931c5 100644 --- a/running_an_api.md +++ b/running_an_api.md @@ -78,7 +78,7 @@ Since we'll need to create some files/dirs for persistent data we'll first creat We'll be using: ```bash -$ mkdir -p ./stacks-node/{persistent-data/postgres,persistent-data/stacks-blockchain,bns,config} +$ mkdir -p ./stacks-node/{persistent-data/postgres,persistent-data/stacks-blockchain,config} $ docker pull blockstack/stacks-blockchain-api \ && docker pull blockstack/stacks-blockchain \ && docker pull postgres:alpine @@ -86,26 +86,6 @@ $ docker network create stacks-blockchain > /dev/null 2>&1 $ cd ./stacks-node ``` -**Optional but recommended**: If you need the v1 BNS data, there are going to be a few extra steps. - -1. Download the BNS data: -`curl -L https://storage.googleapis.com/blockstack-v1-migration-data/export-data.tar.gz -o ./bns/export-data.tar.gz` -2. Extract the data: -`tar -xzvf ./bns/export-data.tar.gz -C ./bns/` -3. Each file in `./bns` will have a corresponding `sha256` value. - -To Verify, run a script like the following to check the sha256sum: - -```bash -for file in `ls ./bns/* | grep -v sha256 | grep -v .tar.gz`; do - if [ $(sha256sum $file | awk {'print $1'}) == $(cat ${file}.sha256 ) ]; then - echo "sha256 Matched $file" - else - echo "sha256 Mismatch $file" - fi -done -``` - ## Postgres The `postgres:alpine` image can be run with default settings, the only requirement is that a password Environment Variable is set for the `postgres` user: `POSTGRES_PASSWORD=postgres` @@ -161,16 +141,9 @@ STACKS_BLOCKCHAIN_API_PORT=3999 STACKS_BLOCKCHAIN_API_HOST=0.0.0.0 STACKS_CORE_RPC_HOST=stacks-blockchain STACKS_CORE_RPC_PORT=20443 -BNS_IMPORT_DIR=/bns-data API_DOCS_URL=https://docs.hiro.so/api ``` -**Note** that here we are importing the bns data with the env var `BNS_IMPORT`. - -To Disable this import, simply comment the line: `#BNS_IMPORT_DIR=/bns-data` - -***If you leave this enabled***: please allow several minutes for the one-time import to complete before continuing. - The other Environment Variables to pay attention to: - `PG_HOST`: Set this to your **postgres** instance. In this guide, we'll be using a container named `postgres`. @@ -184,7 +157,6 @@ docker run -d --rm \ --name stacks-blockchain-api \ --net=stacks-blockchain \ --env-file $(pwd)/.env \ - -v $(pwd)/bns:/bns-data \ -p 3700:3700 \ -p 3999:3999 \ blockstack/stacks-blockchain-api diff --git a/running_api_from_source.md b/running_api_from_source.md index e377fb4f..6a09d9fa 100644 --- a/running_api_from_source.md +++ b/running_api_from_source.md @@ -35,7 +35,7 @@ Since we'll need to create some files/dirs for persistent data, we'll first create a base directory structure and set some permissions: ```bash -$ sudo mkdir -p /stacks-node/{persistent-data/stacks-blockchain,bns,config,binaries} +$ sudo mkdir -p /stacks-node/{persistent-data/stacks-blockchain,config,binaries} $ sudo chown -R $(whoami) /stacks-node $ cd /stacks-node ``` @@ -43,7 +43,7 @@ $ cd /stacks-node ## Install Requirements ```bash -$ PG_VERSION=12 \ +$ PG_VERSION=14 \ && NODE_VERSION=16 \ && sudo apt-get update \ && sudo apt-get install -y \ @@ -65,26 +65,6 @@ $ PG_VERSION=12 \ nodejs ``` -**Optional but recommended** - If you want the V1 BNS data, there are going to be a few extra steps: - -1. Download the BNS data: -`curl -L https://storage.googleapis.com/blockstack-v1-migration-data/export-data.tar.gz -o /stacks-node/bns/export-data.tar.gz` -2. Extract the data: -`tar -xzvf ./bns/export-data.tar.gz -C /stacks-node/bns/` -3. Each file in `./bns` will have a corresponding `sha256` value. - -To Verify, run a script like the following to check the sha256sum: - -```bash -for file in `ls /stacks-node/bns/* | grep -v sha256 | grep -v .tar.gz`; do - if [ $(sha256sum $file | awk {'print $1'}) == $(cat ${file}.sha256 ) ]; then - echo "sha256 Matched $file" - else - echo "sha256 Mismatch $file" - fi -done -``` - ## postgres ### postgres permissions @@ -127,8 +107,6 @@ $ git clone https://github.com/hirosystems/stacks-blockchain-api /stacks-node/st The stacks blockchain api requires several Environment Variables to be set in order to run properly. To reduce complexity, we're going to create a `.env` file that we'll use for these env vars. -** Note: ** to enable BNS names, uncomment `BNS_IMPORT_DIR` in the below `.env` file. - Create a new file: `/stacks-node/stacks-blockchain-api/.env` with the following content: ```bash @@ -148,7 +126,6 @@ STACKS_BLOCKCHAIN_API_PORT=3999 STACKS_BLOCKCHAIN_API_HOST=0.0.0.0 STACKS_CORE_RPC_HOST=localhost STACKS_CORE_RPC_PORT=20443 -#BNS_IMPORT_DIR=/stacks-node/bns EOF $ cd /stacks-node/stacks-blockchain-api && nohup node ./lib/index.js & ``` diff --git a/src/datastore/common.ts b/src/datastore/common.ts index 3edf7f87..c7da2119 100644 --- a/src/datastore/common.ts +++ b/src/datastore/common.ts @@ -439,7 +439,7 @@ export interface DataStoreAttachmentData { blockHeight: number; } -export interface DataStoreSubdomainBlockData { +export interface DataStoreBnsBlockData { index_block_hash: string; parent_index_block_hash: string; microblock_hash: string; @@ -449,7 +449,7 @@ export interface DataStoreSubdomainBlockData { export interface DataStoreAttachmentSubdomainData { attachment?: DataStoreAttachmentData; - blockData?: DataStoreSubdomainBlockData; + blockData?: DataStoreBnsBlockData; subdomains?: DbBnsSubdomain[]; } diff --git a/src/datastore/postgres-store.ts b/src/datastore/postgres-store.ts index 46df66e8..7a0193ac 100644 --- a/src/datastore/postgres-store.ts +++ b/src/datastore/postgres-store.ts @@ -101,7 +101,7 @@ import { DbAssetEventTypeId, DbTxGlobalStatus, DataStoreAttachmentData, - DataStoreSubdomainBlockData, + DataStoreBnsBlockData, DataStoreAttachmentSubdomainData, } from './common'; import { @@ -2175,7 +2175,7 @@ export class PgDataStore ); let isCanonical = true; let txIndex = -1; - const blockData: DataStoreSubdomainBlockData = { + const blockData: DataStoreBnsBlockData = { index_block_hash: '', parent_index_block_hash: '', microblock_hash: '', @@ -7222,7 +7222,7 @@ export class PgDataStore // The `names` and `zonefiles` tables only track latest zonefile changes. We need to check // `nft_custody` for the latest name owner, but only for names that were NOT imported from v1 // since they did not generate an NFT event for us to track. - if (nameZonefile.rows[0].registered_at !== 0) { + if (nameZonefile.rows[0].registered_at !== 1) { let value: Buffer; try { value = bnsNameCV(name); @@ -7427,7 +7427,7 @@ export class PgDataStore names WHERE address = $1 - AND registered_at = 0 + AND registered_at = 1 AND canonical = TRUE AND microblock_canonical = TRUE `, diff --git a/src/event-replay/event-replay.ts b/src/event-replay/event-replay.ts index c1ca711d..09c6700d 100644 --- a/src/event-replay/event-replay.ts +++ b/src/event-replay/event-replay.ts @@ -3,7 +3,8 @@ import * as fs from 'fs'; import { cycleMigrations, dangerousDropAllTables, PgDataStore } from '../datastore/postgres-store'; import { startEventServer } from '../event-stream/event-server'; import { getApiConfiguredChainID, httpPostRequest, logger } from '../helpers'; -import { findTsvBlockHeight, getDbBlockHeight } from './helpers'; +import { findBnsGenesisBlockData, findTsvBlockHeight, getDbBlockHeight } from './helpers'; +import { importV1BnsNames, importV1BnsSubdomains, importV1TokenOfferingData } from '../import-v1'; enum EventImportMode { /** @@ -107,6 +108,8 @@ export async function importEventsFromTsv( if (eventImportMode === EventImportMode.pruned) { console.log(`Ignoring all prunable events before block height: ${prunedBlockHeight}`); } + // Look for the TSV's genesis block information for BNS import. + const tsvGenesisBlockData = await findBnsGenesisBlockData(resolvedFilePath); const db = await PgDataStore.connect({ usageName: 'import-events', @@ -122,6 +125,18 @@ export async function importEventsFromTsv( httpLogLevel: 'debug', }); + await importV1TokenOfferingData(db); + + // Import V1 BNS names first. Subdomains will be imported after TSV replay is finished in order to + // keep the size of the `subdomains` table small. + if (process.env.BNS_IMPORT_DIR) { + logger.info(`Using BNS export data from: ${process.env.BNS_IMPORT_DIR}`); + await importV1BnsNames(db, process.env.BNS_IMPORT_DIR, tsvGenesisBlockData); + } else { + logger.warn(`Notice: full BNS functionality requires 'BNS_IMPORT_DIR' to be set.`); + } + + // Import TSV chain data const readStream = fs.createReadStream(resolvedFilePath); const rawEventsIterator = PgDataStore.getRawEventRequests(readStream, status => { console.log(status); @@ -163,6 +178,9 @@ export async function importEventsFromTsv( } } await db.finishEventReplay(); + if (process.env.BNS_IMPORT_DIR) { + await importV1BnsSubdomains(db, process.env.BNS_IMPORT_DIR, tsvGenesisBlockData); + } console.log(`Event import and playback successful.`); await eventServer.closeAsync(); await db.close(); diff --git a/src/event-replay/helpers.ts b/src/event-replay/helpers.ts index d6007e51..c03dea42 100644 --- a/src/event-replay/helpers.ts +++ b/src/event-replay/helpers.ts @@ -1,6 +1,15 @@ +import * as fs from 'fs'; +import * as readline from 'readline'; +import { decodeTransaction, TxPayloadTypeID } from 'stacks-encoding-native-js'; +import { DataStoreBnsBlockData } from '../datastore/common'; import { PgDataStore } from '../datastore/postgres-store'; import { ReverseFileStream } from './reverse-file-stream'; +export type BnsGenesisBlock = DataStoreBnsBlockData & { + tx_id: string; + tx_index: number; +}; + /** * Traverse a TSV file in reverse to find the last received `/new_block` node message and return * the `block_height` reported by that event. Even though the block produced by that event might @@ -26,6 +35,47 @@ export async function findTsvBlockHeight(filePath: string): Promise { return blockHeight; } +/** + * Traverse a TSV file to find the genesis block and extract its data so we can use it during V1 BNS + * import. + * @param filePath - TSV path + * @returns Genesis block data + */ +export async function findBnsGenesisBlockData(filePath: string): Promise { + const rl = readline.createInterface({ + input: fs.createReadStream(filePath), + crlfDelay: Infinity, + }); + for await (const line of rl) { + const columns = line.split('\t'); + const eventName = columns[2]; + if (eventName === '/new_block') { + const payload = JSON.parse(columns[3]); + // Look for block 1 + if (payload.block_height === 1) { + for (const tx of payload.transactions) { + const decodedTx = decodeTransaction(tx.raw_tx); + // Look for the only token transfer transaction in the genesis block. This is the one + // that contains all the events, including all BNS name registrations. + if (decodedTx.payload.type_id === TxPayloadTypeID.TokenTransfer) { + rl.close(); + return { + index_block_hash: payload.index_block_hash, + parent_index_block_hash: payload.parent_index_block_hash, + microblock_hash: payload.parent_microblock, + microblock_sequence: payload.parent_microblock_sequence, + microblock_canonical: true, + tx_id: decodedTx.tx_id, + tx_index: tx.tx_index, + }; + } + } + } + } + } + throw new Error('BNS genesis block data not found'); +} + /** * Get the current block height from the DB. We won't use the `getChainTip` method since that * adds some conversions from block hashes into strings that we're not interested in. We also can't diff --git a/src/import-v1/index.ts b/src/import-v1/index.ts index fa433d0d..cd70ca35 100644 --- a/src/import-v1/index.ts +++ b/src/import-v1/index.ts @@ -7,7 +7,6 @@ import * as path from 'path'; import * as zlib from 'zlib'; import { bitcoinToStacksAddress } from 'stacks-encoding-native-js'; import * as split2 from 'split2'; - import { DbBnsName, DbBnsNamespace, @@ -24,15 +23,8 @@ import { logger, REPO_DIR, } from '../helpers'; - import { PoolClient } from 'pg'; - -const IMPORT_FILES = [ - 'chainstate.txt', - 'name_zonefiles.txt', - 'subdomains.csv', - 'subdomain_zonefiles.txt', -]; +import { BnsGenesisBlock } from '../event-replay/helpers'; const finished = util.promisify(stream.finished); const pipeline = util.promisify(stream.pipeline); @@ -87,20 +79,20 @@ class ChainProcessor extends stream.Writable { namespace: Map; db: PgDataStore; client: PoolClient; - emptyBlockData = { - index_block_hash: '', - parent_index_block_hash: '', - microblock_hash: '', - microblock_sequence: I32_MAX, - microblock_canonical: true, - } as const; + genesisBlock: BnsGenesisBlock; - constructor(client: PoolClient, db: PgDataStore, zhashes: Map) { + constructor( + client: PoolClient, + db: PgDataStore, + zhashes: Map, + genesisBlock: BnsGenesisBlock + ) { super(); this.zhashes = zhashes; this.namespace = new Map(); this.client = client; this.db = db; + this.genesisBlock = genesisBlock; logger.info(`${this.tag}: importer starting`); } @@ -159,16 +151,16 @@ class ChainProcessor extends stream.Writable { name: parts[0], address: parts[1], namespace_id: ns, - registered_at: 0, + registered_at: 1, expire_block: namespace.lifetime, zonefile: zonefile, zonefile_hash: zonefileHash, - tx_id: '', - tx_index: 0, + tx_id: this.genesisBlock.tx_id, + tx_index: this.genesisBlock.tx_index, canonical: true, status: 'name-register', }; - await this.db.updateNames(this.client, this.emptyBlockData, obj); + await this.db.updateNames(this.client, this.genesisBlock, obj); this.rowCount += 1; if (obj.zonefile === '') { logger.verbose( @@ -182,20 +174,20 @@ class ChainProcessor extends stream.Writable { const obj: DbBnsNamespace = { namespace_id: parts[0], address: parts[1], - reveal_block: 0, - ready_block: 0, + reveal_block: 1, + ready_block: 1, buckets: parts[2], base: BigInt(parts[3]), coeff: BigInt(parts[4]), nonalpha_discount: parseInt(parts[5], 10), no_vowel_discount: parseInt(parts[6], 10), lifetime: parseInt(parts[7], 10), - tx_id: '', - tx_index: 0, + tx_id: this.genesisBlock.tx_id, + tx_index: this.genesisBlock.tx_index, canonical: true, }; this.namespace.set(obj.namespace_id, obj); - await this.db.updateNamespaces(this.client, this.emptyBlockData, obj); + await this.db.updateNamespaces(this.client, this.genesisBlock, obj); this.rowCount += 1; } } @@ -239,9 +231,13 @@ function btcToStxAddress(btcAddress: string) { } class SubdomainTransform extends stream.Transform { - constructor() { + genesisBlock: BnsGenesisBlock; + + constructor(genesisBlock: BnsGenesisBlock) { super({ objectMode: true, highWaterMark: SUBDOMAIN_BATCH_SIZE }); + this.genesisBlock = genesisBlock; } + _transform(data: string, _encoding: string, callback: stream.TransformCallback) { const parts = data.split(','); if (parts[0] !== 'zonefile_hash') { @@ -258,8 +254,8 @@ class SubdomainTransform extends stream.Transform { fully_qualified_subdomain: parts[2], owner: btcToStxAddress(parts[3]), //convert btc address to stx, block_height: 1, // burn_block_height: parseInt(parts[4], 10) - tx_index: 0, - tx_id: '', + tx_index: this.genesisBlock.tx_index, + tx_id: this.genesisBlock.tx_id, parent_zonefile_index: parseInt(parts[5], 10), zonefile_offset: parseInt(parts[6], 10), resolver: parts[7], @@ -309,12 +305,12 @@ async function valid(fileName: string): Promise { return true; } -async function* readSubdomains(importDir: string) { +async function* readSubdomains(importDir: string, genesisBlock: BnsGenesisBlock) { const metaIter = asyncIterableToGenerator( stream.pipeline( fs.createReadStream(path.join(importDir, 'subdomains.csv')), new LineReaderStream({ highWaterMark: SUBDOMAIN_BATCH_SIZE }), - new SubdomainTransform(), + new SubdomainTransform(genesisBlock), error => { if (error) { console.error('Error reading subdomains.csv'); @@ -396,13 +392,7 @@ class StxVestingTransform extends stream.Transform { } } -export async function importV1BnsData(db: PgDataStore, importDir: string) { - const configState = await db.getConfigState(); - if (configState.bns_names_onchain_imported && configState.bns_subdomains_imported) { - logger.verbose('Stacks 1.0 BNS data is already imported'); - return; - } - +async function validateBnsImportDir(importDir: string, importFiles: string[]) { try { const statResult = fs.statSync(importDir); if (!statResult.isDirectory()) { @@ -413,18 +403,29 @@ export async function importV1BnsData(db: PgDataStore, importDir: string) { throw error; } - logger.info('Stacks 1.0 BNS data import started'); - logger.info(`Using BNS export data from: ${importDir}`); - // validate contents with their .sha256 files // check if the files we need can be read - for (const fname of IMPORT_FILES) { + for (const fname of importFiles) { if (!(await valid(path.join(importDir, fname)))) { const errMsg = `Cannot read import file due to sha256 mismatch: ${fname}`; logError(errMsg); throw new Error(errMsg); } } +} + +export async function importV1BnsNames( + db: PgDataStore, + importDir: string, + genesisBlock: BnsGenesisBlock +) { + const configState = await db.getConfigState(); + if (configState.bns_names_onchain_imported) { + logger.verbose('Stacks 1.0 BNS names are already imported'); + return; + } + await validateBnsImportDir(importDir, ['chainstate.txt', 'name_zonefiles.txt']); + logger.info('Stacks 1.0 BNS name import started'); const client = await db.pool.connect(); try { @@ -433,26 +434,53 @@ export async function importV1BnsData(db: PgDataStore, importDir: string) { await pipeline( fs.createReadStream(path.join(importDir, 'chainstate.txt')), new LineReaderStream({ highWaterMark: 100 }), - new ChainProcessor(client, db, zhashes) + new ChainProcessor(client, db, zhashes, genesisBlock) ); - - const blockData = { - index_block_hash: '', - parent_index_block_hash: '', - microblock_hash: '', - microblock_sequence: I32_MAX, - microblock_canonical: true, + const updatedConfigState: DbConfigState = { + ...configState, + bns_names_onchain_imported: true, }; + await db.updateConfigState(updatedConfigState, client); + await client.query('COMMIT'); + } catch (error) { + await client.query('ROLLBACK'); + throw error; + } finally { + client.release(); + } + logger.info('Stacks 1.0 BNS name import completed'); +} + +export async function importV1BnsSubdomains( + db: PgDataStore, + importDir: string, + genesisBlock: BnsGenesisBlock +) { + const configState = await db.getConfigState(); + if (configState.bns_subdomains_imported) { + logger.verbose('Stacks 1.0 BNS subdomains are already imported'); + return; + } + await validateBnsImportDir(importDir, ['subdomains.csv', 'subdomain_zonefiles.txt']); + logger.info('Stacks 1.0 BNS subdomain import started'); + + const client = await db.pool.connect(); + try { + await client.query('BEGIN'); let subdomainsImported = 0; - const subdomainIter = readSubdomains(importDir); + const subdomainIter = readSubdomains(importDir, genesisBlock); for await (const subdomainBatch of asyncBatchIterate( subdomainIter, SUBDOMAIN_BATCH_SIZE, false )) { - await db.updateBatchSubdomains(client, [{ blockData, subdomains: subdomainBatch }]); - await db.updateBatchZonefiles(client, [{ blockData, subdomains: subdomainBatch }]); + await db.updateBatchSubdomains(client, [ + { blockData: genesisBlock, subdomains: subdomainBatch }, + ]); + await db.updateBatchZonefiles(client, [ + { blockData: genesisBlock, subdomains: subdomainBatch }, + ]); subdomainsImported += subdomainBatch.length; if (subdomainsImported % 10_000 === 0) { logger.info(`Subdomains imported: ${subdomainsImported}`); @@ -462,7 +490,6 @@ export async function importV1BnsData(db: PgDataStore, importDir: string) { const updatedConfigState: DbConfigState = { ...configState, - bns_names_onchain_imported: true, bns_subdomains_imported: true, }; await db.updateConfigState(updatedConfigState, client); @@ -474,7 +501,7 @@ export async function importV1BnsData(db: PgDataStore, importDir: string) { client.release(); } - logger.info('Stacks 1.0 BNS data import completed'); + logger.info('Stacks 1.0 BNS subdomain import completed'); } /** A passthrough stream which hashes the data as it passes through. */ diff --git a/src/index.ts b/src/index.ts index 57085f27..56ec77f4 100644 --- a/src/index.ts +++ b/src/index.ts @@ -18,7 +18,6 @@ import { startEventServer } from './event-stream/event-server'; import { StacksCoreRpcClient } from './core-rpc/client'; import { createServer as createPrometheusServer } from '@promster/server'; import { registerShutdownConfig } from './shutdown-handler'; -import { importV1TokenOfferingData, importV1BnsData } from './import-v1'; import { OfflineDummyStore } from './datastore/offline-dummy-store'; import { Socket } from 'net'; import * as getopts from 'getopts'; @@ -123,23 +122,7 @@ async function init(): Promise { }); if (apiMode !== StacksApiMode.readOnly) { - if (db instanceof PgDataStore) { - if (isProdEnv) { - await importV1TokenOfferingData(db); - } else { - logger.warn( - `Notice: skipping token offering data import because of non-production NODE_ENV` - ); - } - if (isProdEnv && !process.env.BNS_IMPORT_DIR) { - logger.warn(`Notice: full BNS functionality requires 'BNS_IMPORT_DIR' to be set.`); - } else if (process.env.BNS_IMPORT_DIR) { - await importV1BnsData(db, process.env.BNS_IMPORT_DIR); - } - } - const configuredChainID = getApiConfiguredChainID(); - const eventServer = await startEventServer({ datastore: db, chainId: configuredChainID, diff --git a/src/tests-bns/api.ts b/src/tests-bns/api.ts index deee6a52..7ace87af 100644 --- a/src/tests-bns/api.ts +++ b/src/tests-bns/api.ts @@ -414,7 +414,7 @@ describe('BNS API tests', () => { .build(); await db.update(block); - // Register another name in block 0 (imported from v1, so no nft_event produced) + // Register another name in block 1 (imported from v1, so no nft_event produced) const dbName2: DbBnsName = { name: 'imported.btc', address: address, @@ -422,7 +422,7 @@ describe('BNS API tests', () => { expire_block: 10000, zonefile: 'test-zone-file', zonefile_hash: 'zonefileHash', - registered_at: 0, + registered_at: 1, canonical: true, tx_id: '', tx_index: 0, diff --git a/src/tests-bns/v1-import-tests.ts b/src/tests-bns/v1-import-tests.ts index 47283d22..4e38a63e 100644 --- a/src/tests-bns/v1-import-tests.ts +++ b/src/tests-bns/v1-import-tests.ts @@ -5,15 +5,18 @@ import * as supertest from 'supertest'; import { startEventServer } from '../event-stream/event-server'; import { Server } from 'net'; import { ChainID } from '@stacks/transactions'; -import { importV1BnsData } from '../import-v1'; +import { importV1BnsNames, importV1BnsSubdomains } from '../import-v1'; import * as assert from 'assert'; import { TestBlockBuilder } from '../test-utils/test-builders'; +import { DataStoreBlockUpdateData } from '../datastore/common'; +import { BnsGenesisBlock } from '../event-replay/helpers'; describe('BNS V1 import', () => { let db: PgDataStore; let client: PoolClient; let eventServer: Server; let api: ApiServer; + let block: DataStoreBlockUpdateData; beforeEach(async () => { process.env.PG_DATABASE = 'postgres'; @@ -23,12 +26,22 @@ describe('BNS V1 import', () => { eventServer = await startEventServer({ datastore: db, chainId: ChainID.Testnet, httpLogLevel: 'silly' }); api = await startApiServer({ datastore: db, chainId: ChainID.Testnet, httpLogLevel: 'silly' }); - const block = new TestBlockBuilder().build(); + block = new TestBlockBuilder().addTx().build(); await db.update(block); }); test('v1-import', async () => { - await importV1BnsData(db, 'src/tests-bns/import-test-files'); + const genesis: BnsGenesisBlock = { + index_block_hash: block.block.index_block_hash, + parent_index_block_hash: block.block.parent_index_block_hash, + microblock_canonical: true, + microblock_hash: block.block.parent_microblock_hash, + microblock_sequence: block.block.parent_microblock_sequence, + tx_id: block.txs[0].tx.tx_id, + tx_index: block.txs[0].tx.tx_index, + }; + await importV1BnsNames(db, 'src/tests-bns/import-test-files', genesis); + await importV1BnsSubdomains(db, 'src/tests-bns/import-test-files', genesis); // Names const query1 = await supertest(api.server).get(`/v1/names/zumrai.id`); @@ -37,8 +50,8 @@ describe('BNS V1 import', () => { expect(query1.body).toEqual({ address: 'SP29EJ0SVM2TRZ3XGVTZPVTKF4SV1VMD8C0GA5SK5', blockchain: 'stacks', - expire_block: 52595, - last_txid: '', + expire_block: 52596, + last_txid: '0x1234', status: 'name-register', zonefile: '$ORIGIN zumrai.id\n$TTL 3600\n_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1EPno1VcdGx89ukN2we4iVpnFtkHzw8i5d/profile.json"\n\n', @@ -121,7 +134,7 @@ describe('BNS V1 import', () => { expect(query9.body).toEqual({ address: 'SP2S2F9TCAT43KEJT02YTG2NXVCPZXS1426T63D9H', blockchain: 'stacks', - last_txid: '', + last_txid: '0x1234', resolver: 'https://registrar.blockstack.org', status: 'registered_subdomain', zonefile: From 9da4d8a8e6d7beeb188b37dc4117638ff74a5f55 Mon Sep 17 00:00:00 2001 From: semantic-release-bot Date: Wed, 31 Aug 2022 13:43:57 +0000 Subject: [PATCH 15/24] chore(release): 5.0.0-beta.3 [skip ci] ## [5.0.0-beta.3](https://github.com/hirosystems/stacks-blockchain-api/compare/v5.0.0-beta.2...v5.0.0-beta.3) (2022-08-31) ### Bug Fixes * import BNS v1 data during event replay ([#1301](https://github.com/hirosystems/stacks-blockchain-api/issues/1301)) ([bc59817](https://github.com/hirosystems/stacks-blockchain-api/commit/bc59817aa98dd3a978a27b73d14738b64eb823f9)) --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c47581cd..fe04656b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,10 @@ +## [5.0.0-beta.3](https://github.com/hirosystems/stacks-blockchain-api/compare/v5.0.0-beta.2...v5.0.0-beta.3) (2022-08-31) + + +### Bug Fixes + +* import BNS v1 data during event replay ([#1301](https://github.com/hirosystems/stacks-blockchain-api/issues/1301)) ([bc59817](https://github.com/hirosystems/stacks-blockchain-api/commit/bc59817aa98dd3a978a27b73d14738b64eb823f9)) + ## [5.0.0-beta.2](https://github.com/hirosystems/stacks-blockchain-api/compare/v5.0.0-beta.1...v5.0.0-beta.2) (2022-08-26) From 03a1896cff8937a5f39a8b75e5adf51a6344592c Mon Sep 17 00:00:00 2001 From: CharlieC3 <2747302+CharlieC3@users.noreply.github.com> Date: Tue, 30 Aug 2022 11:26:22 -0400 Subject: [PATCH 16/24] fix: add postgres connection error checking for ECONNRESET code --- src/datastore/postgres-store.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/datastore/postgres-store.ts b/src/datastore/postgres-store.ts index 7a0193ac..c275f7e8 100644 --- a/src/datastore/postgres-store.ts +++ b/src/datastore/postgres-store.ts @@ -313,6 +313,8 @@ function isPgConnectionError(error: any): string | false { return 'Postgres connection ETIMEDOUT'; } else if (error.code === 'ENOTFOUND') { return 'Postgres connection ENOTFOUND'; + } else if (error.code === 'ECONNRESET') { + return 'Postgres connection ECONNRESET'; } else if (error.message) { const msg = (error as Error).message.toLowerCase(); if (msg.includes('database system is starting up')) { From 00e71975db2dd1fc4f4a2b2e6f0ca7998db2378f Mon Sep 17 00:00:00 2001 From: semantic-release-bot Date: Wed, 31 Aug 2022 15:16:57 +0000 Subject: [PATCH 17/24] chore(release): 5.0.0-beta.4 [skip ci] ## [5.0.0-beta.4](https://github.com/hirosystems/stacks-blockchain-api/compare/v5.0.0-beta.3...v5.0.0-beta.4) (2022-08-31) ### Bug Fixes * add postgres connection error checking for ECONNRESET code ([03a1896](https://github.com/hirosystems/stacks-blockchain-api/commit/03a1896cff8937a5f39a8b75e5adf51a6344592c)) --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fe04656b..c5ba0261 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,10 @@ +## [5.0.0-beta.4](https://github.com/hirosystems/stacks-blockchain-api/compare/v5.0.0-beta.3...v5.0.0-beta.4) (2022-08-31) + + +### Bug Fixes + +* add postgres connection error checking for ECONNRESET code ([03a1896](https://github.com/hirosystems/stacks-blockchain-api/commit/03a1896cff8937a5f39a8b75e5adf51a6344592c)) + ## [5.0.0-beta.3](https://github.com/hirosystems/stacks-blockchain-api/compare/v5.0.0-beta.2...v5.0.0-beta.3) (2022-08-31) From cd381a95b4d0d3f4bb08e447500153c3f652eff6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafael=20C=C3=A1rdenas?= Date: Wed, 31 Aug 2022 12:07:00 -0500 Subject: [PATCH 18/24] fix: detect name transfers and renewals in special circumstances (#1303) * fix: take new owner from nft event * fix: remove check on nft_custody view as it is no longer required * fix: name-renewal with no zonefile hash * fix: import v1 data during replay * fix: headers already sent in redirect * fix: import names first, subdomains last --- src/api/routes/bns/names.ts | 1 - src/datastore/postgres-store.ts | 109 +++++----- src/event-stream/bns/bns-helpers.ts | 73 +++++-- src/event-stream/core-node-message.ts | 2 +- src/event-stream/event-server.ts | 65 ++++-- src/tests-bns/event-server-tests.ts | 289 ++++++++++++++++++++++++++ 6 files changed, 456 insertions(+), 83 deletions(-) diff --git a/src/api/routes/bns/names.ts b/src/api/routes/bns/names.ts index dc416fb8..67bd12be 100644 --- a/src/api/routes/bns/names.ts +++ b/src/api/routes/bns/names.ts @@ -98,7 +98,6 @@ export function createBnsNamesRouter(db: DataStore, chainId: ChainID): express.R return; } res.redirect(`${resolverResult.result}/v1/names${req.url}`); - next(); return; } res.status(404).json({ error: `cannot find subdomain ${name}` }); diff --git a/src/datastore/postgres-store.ts b/src/datastore/postgres-store.ts index c275f7e8..03b507df 100644 --- a/src/datastore/postgres-store.ts +++ b/src/datastore/postgres-store.ts @@ -1491,12 +1491,12 @@ export class PgDataStore for (const smartContract of entry.smartContracts) { await this.updateSmartContract(client, entry.tx, smartContract); } - for (const bnsName of entry.names) { - await this.updateNames(client, entry.tx, bnsName); - } for (const namespace of entry.namespaces) { await this.updateNamespaces(client, entry.tx, namespace); } + for (const bnsName of entry.names) { + await this.updateNames(client, entry.tx, bnsName); + } } await this.refreshNftCustody(client, batchedTxData); await this.refreshMaterializedView(client, 'chain_tip'); @@ -1701,12 +1701,12 @@ export class PgDataStore for (const smartContract of entry.smartContracts) { await this.updateSmartContract(client, entry.tx, smartContract); } - for (const bnsName of entry.names) { - await this.updateNames(client, entry.tx, bnsName); - } for (const namespace of entry.namespaces) { await this.updateNamespaces(client, entry.tx, namespace); } + for (const bnsName of entry.names) { + await this.updateNames(client, entry.tx, bnsName); + } } } @@ -6896,8 +6896,59 @@ export class PgDataStore status, canonical, } = bnsName; - // inserting remaining names information in names table - const validZonefileHash = this.validateZonefileHash(zonefile_hash); + // Try to figure out the name's expiration block based on its namespace's lifetime. However, if + // the name was only transferred, keep the expiration from the last register/renewal we had. + let expireBlock = expire_block; + if (status === 'name-transfer') { + const prevExpiration = await client.query<{ expire_block: number }>( + `SELECT expire_block + FROM names + WHERE name = $1 + AND canonical = TRUE AND microblock_canonical = TRUE + ORDER BY registered_at DESC, microblock_sequence DESC, tx_index DESC + LIMIT 1`, + [name] + ); + if (prevExpiration.rowCount > 0) { + expireBlock = prevExpiration.rows[0].expire_block; + } + } else { + const namespaceLifetime = await client.query<{ lifetime: number }>( + `SELECT lifetime + FROM namespaces + WHERE namespace_id = $1 + AND canonical = true AND microblock_canonical = true + ORDER BY namespace_id, ready_block DESC, microblock_sequence DESC, tx_index DESC + LIMIT 1`, + [namespace_id] + ); + if (namespaceLifetime.rowCount > 0) { + expireBlock = registered_at + namespaceLifetime.rows[0].lifetime; + } + } + // If we didn't receive a zonefile, keep the last valid one. + let finalZonefile = zonefile; + let finalZonefileHash = zonefile_hash; + if (finalZonefileHash === '') { + const lastZonefile = await client.query<{ zonefile: string; zonefile_hash: string }>( + ` + SELECT z.zonefile, z.zonefile_hash + FROM zonefiles AS z + INNER JOIN names AS n USING (name, tx_id, index_block_hash) + WHERE z.name = $1 + AND n.canonical = TRUE + AND n.microblock_canonical = TRUE + ORDER BY n.registered_at DESC, n.microblock_sequence DESC, n.tx_index DESC + LIMIT 1 + `, + [name] + ); + if (lastZonefile.rowCount > 0) { + finalZonefile = lastZonefile.rows[0].zonefile; + finalZonefileHash = lastZonefile.rows[0].zonefile_hash; + } + } + const validZonefileHash = this.validateZonefileHash(finalZonefileHash); await client.query( ` INSERT INTO zonefiles (name, zonefile, zonefile_hash, tx_id, index_block_hash) @@ -6907,26 +6958,12 @@ export class PgDataStore `, [ name, - zonefile, + finalZonefile, validZonefileHash, hexToBuffer(tx_id), hexToBuffer(blockData.index_block_hash), ] ); - // Try to figure out the name's expiration block based on its namespace's lifetime. - const namespaceLifetime = await client.query<{ lifetime: number }>( - `SELECT lifetime - FROM namespaces - WHERE namespace_id = $1 - AND canonical = true AND microblock_canonical = true - ORDER BY namespace_id, ready_block DESC, microblock_sequence DESC, tx_index DESC - LIMIT 1`, - [namespace_id] - ); - const expireBlock = - namespaceLifetime.rowCount > 0 - ? registered_at + namespaceLifetime.rows[0].lifetime - : expire_block; await client.query( ` INSERT INTO names( @@ -7221,32 +7258,6 @@ export class PgDataStore if (nameZonefile.rowCount === 0) { return; } - // The `names` and `zonefiles` tables only track latest zonefile changes. We need to check - // `nft_custody` for the latest name owner, but only for names that were NOT imported from v1 - // since they did not generate an NFT event for us to track. - if (nameZonefile.rows[0].registered_at !== 1) { - let value: Buffer; - try { - value = bnsNameCV(name); - } catch (error) { - return; - } - const nameCustody = await client.query<{ recipient: string }>( - ` - SELECT recipient - FROM ${includeUnanchored ? 'nft_custody_unanchored' : 'nft_custody'} - WHERE asset_identifier = $1 AND value = $2 - `, - [getBnsSmartContractId(chainId), value] - ); - if (nameCustody.rowCount === 0) { - return; - } - return { - ...nameZonefile.rows[0], - address: nameCustody.rows[0].recipient, - }; - } return nameZonefile.rows[0]; }); if (queryResult) { diff --git a/src/event-stream/bns/bns-helpers.ts b/src/event-stream/bns/bns-helpers.ts index a73edfe7..485119f6 100644 --- a/src/event-stream/bns/bns-helpers.ts +++ b/src/event-stream/bns/bns-helpers.ts @@ -1,6 +1,11 @@ -import { ChainID, ClarityType, hexToCV } from '@stacks/transactions'; +import { BufferCV, ChainID, ClarityType, hexToCV, StringAsciiCV } from '@stacks/transactions'; import { hexToBuffer, hexToUtf8String } from '../../helpers'; -import { CoreNodeParsedTxMessage } from '../../event-stream/core-node-message'; +import { + CoreNodeEvent, + CoreNodeEventType, + CoreNodeParsedTxMessage, + NftTransferEvent, +} from '../../event-stream/core-node-message'; import { getCoreNodeEndpoint } from '../../core-rpc/client'; import { StacksMainnet, StacksTestnet } from '@stacks/network'; import { URIType } from 'zone-file/dist/zoneFile'; @@ -244,10 +249,48 @@ function isEventFromBnsContract(event: SmartContractEvent): boolean { ); } +export function parseNameRenewalWithNoZonefileHashFromContractCall( + tx: CoreNodeParsedTxMessage, + chainId: ChainID +): DbBnsName | undefined { + const payload = tx.parsed_tx.payload; + if ( + payload.type_id === TxPayloadTypeID.ContractCall && + payload.function_name === 'name-renewal' && + getBnsContractID(chainId) === `${payload.address}.${payload.contract_name}` && + payload.function_args.length === 5 && + hexToCV(payload.function_args[4].hex).type === ClarityType.OptionalNone + ) { + const namespace = (hexToCV(payload.function_args[0].hex) as BufferCV).buffer.toString('utf8'); + const name = (hexToCV(payload.function_args[1].hex) as BufferCV).buffer.toString('utf8'); + return { + name: `${name}.${namespace}`, + namespace_id: namespace, + // NOTE: We're not using the `new_owner` argument here because there's a bug in the BNS + // contract that doesn't actually transfer the name to the given principal: + // https://github.com/stacks-network/stacks-blockchain/issues/2680, maybe this will be fixed + // in Stacks 2.1 + address: tx.sender_address, + // expire_block will be calculated upon DB insert based on the namespace's lifetime. + expire_block: 0, + registered_at: tx.block_height, + // Since we received no zonefile_hash, the previous one will be reused when writing to DB. + zonefile_hash: '', + zonefile: '', + tx_id: tx.parsed_tx.tx_id, + tx_index: tx.core_tx.tx_index, + status: 'name-renewal', + canonical: true, + }; + } +} + export function parseNameFromContractEvent( event: SmartContractEvent, tx: CoreNodeParsedTxMessage, - blockHeight: number + txEvents: CoreNodeEvent[], + blockHeight: number, + chainId: ChainID ): DbBnsName | undefined { if (!isEventFromBnsContract(event)) { return; @@ -259,19 +302,21 @@ export function parseNameFromContractEvent( return; } let name_address = attachment.attachment.metadata.tx_sender.address; - // Is this a `name-transfer` contract call? If so, record the new owner. - if ( - attachment.attachment.metadata.op === 'name-transfer' && - tx.parsed_tx.payload.type_id === TxPayloadTypeID.ContractCall && - tx.parsed_tx.payload.function_args.length >= 3 && - tx.parsed_tx.payload.function_args[2].type_id === ClarityTypeID.PrincipalStandard - ) { - const decoded = decodeClarityValue(tx.parsed_tx.payload.function_args[2].hex); - const principal = decoded as ClarityValuePrincipalStandard; - name_address = principal.address; + // Is this a `name-transfer`? If so, look for the new owner in an `nft_transfer` event bundled in + // the same transaction. + if (attachment.attachment.metadata.op === 'name-transfer') { + for (const txEvent of txEvents) { + if ( + txEvent.type === CoreNodeEventType.NftTransferEvent && + txEvent.nft_transfer_event.asset_identifier === `${getBnsContractID(chainId)}::names` + ) { + name_address = txEvent.nft_transfer_event.recipient; + break; + } + } } const name: DbBnsName = { - name: attachment.attachment.metadata.name.concat('.', attachment.attachment.metadata.namespace), + name: `${attachment.attachment.metadata.name}.${attachment.attachment.metadata.namespace}`, namespace_id: attachment.attachment.metadata.namespace, address: name_address, // expire_block will be calculated upon DB insert based on the namespace's lifetime. diff --git a/src/event-stream/core-node-message.ts b/src/event-stream/core-node-message.ts index e8dd76a8..1a7452a5 100644 --- a/src/event-stream/core-node-message.ts +++ b/src/event-stream/core-node-message.ts @@ -76,7 +76,7 @@ export interface StxLockEvent extends CoreNodeEventBase { }; } -interface NftTransferEvent extends CoreNodeEventBase { +export interface NftTransferEvent extends CoreNodeEventBase { type: CoreNodeEventType.NftTransferEvent; nft_transfer_event: { /** Fully qualified asset ID, e.g. "ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH.contract-name.asset-name" */ diff --git a/src/event-stream/event-server.ts b/src/event-stream/event-server.ts index 18f1acd0..306b3298 100644 --- a/src/event-stream/event-server.ts +++ b/src/event-stream/event-server.ts @@ -63,7 +63,11 @@ import { } from 'stacks-encoding-native-js'; import { ChainID } from '@stacks/transactions'; import { BnsContractIdentifier } from './bns/bns-constants'; -import { parseNameFromContractEvent, parseNamespaceFromContractEvent } from './bns/bns-helpers'; +import { + parseNameFromContractEvent, + parseNameRenewalWithNoZonefileHashFromContractCall, + parseNamespaceFromContractEvent, +} from './bns/bns-helpers'; async function handleRawEventRequest( eventPath: string, @@ -199,10 +203,15 @@ async function handleMicroblockMessage( }); const updateData: DataStoreMicroblockUpdateData = { microblocks: dbMicroblocks, - txs: parseDataStoreTxEventData(parsedTxs, msg.events, { - block_height: -1, // TODO: fill during initial db insert - index_block_hash: '', - }), + txs: parseDataStoreTxEventData( + parsedTxs, + msg.events, + { + block_height: -1, // TODO: fill during initial db insert + index_block_hash: '', + }, + chainId + ), }; await db.updateMicroblocks(updateData); } @@ -299,7 +308,7 @@ async function handleBlockMessage( block: dbBlock, microblocks: dbMicroblocks, minerRewards: dbMinerRewards, - txs: parseDataStoreTxEventData(parsedTxs, msg.events, msg), + txs: parseDataStoreTxEventData(parsedTxs, msg.events, msg, chainId), }; await db.update(dbData); @@ -311,7 +320,8 @@ function parseDataStoreTxEventData( blockData: { block_height: number; index_block_hash: string; - } + }, + chainId: ChainID ): DataStoreTxEventData[] { const dbData: DataStoreTxEventData[] = parsedTxs.map(tx => { const dbTx: DataStoreBlockUpdateData['txs'][number] = { @@ -325,16 +335,29 @@ function parseDataStoreTxEventData( names: [], namespaces: [], }; - if (tx.parsed_tx.payload.type_id === TxPayloadTypeID.SmartContract) { - const contractId = `${tx.sender_address}.${tx.parsed_tx.payload.contract_name}`; - dbTx.smartContracts.push({ - tx_id: tx.core_tx.txid, - contract_id: contractId, - block_height: blockData.block_height, - source_code: tx.parsed_tx.payload.code_body, - abi: JSON.stringify(tx.core_tx.contract_abi), - canonical: true, - }); + switch (tx.parsed_tx.payload.type_id) { + case TxPayloadTypeID.SmartContract: + const contractId = `${tx.sender_address}.${tx.parsed_tx.payload.contract_name}`; + dbTx.smartContracts.push({ + tx_id: tx.core_tx.txid, + contract_id: contractId, + block_height: blockData.block_height, + source_code: tx.parsed_tx.payload.code_body, + abi: JSON.stringify(tx.core_tx.contract_abi), + canonical: true, + }); + break; + case TxPayloadTypeID.ContractCall: + // Name renewals can happen without a zonefile_hash. In that case, the BNS contract does NOT + // emit a `name-renewal` contract log, causing us to miss this event. This function catches + // those cases. + const name = parseNameRenewalWithNoZonefileHashFromContractCall(tx, chainId); + if (name) { + dbTx.names.push(name); + } + break; + default: + break; } return dbTx; }); @@ -372,7 +395,13 @@ function parseDataStoreTxEventData( if (!parsedTx) { throw new Error(`Unexpected missing tx during BNS parsing by tx_id ${event.txid}`); } - const name = parseNameFromContractEvent(event, parsedTx, blockData.block_height); + const name = parseNameFromContractEvent( + event, + parsedTx, + events, + blockData.block_height, + chainId + ); if (name) { dbTx.names.push(name); } diff --git a/src/tests-bns/event-server-tests.ts b/src/tests-bns/event-server-tests.ts index 1596da79..c8ea8c61 100644 --- a/src/tests-bns/event-server-tests.ts +++ b/src/tests-bns/event-server-tests.ts @@ -129,6 +129,295 @@ describe('BNS event server tests', () => { expect(namespace.result?.ready_block).toBe(2); }); + test('name-transfer called by a contract other than BNS', async () => { + const block = new TestBlockBuilder({ + block_height: 1, + block_hash: '0x09458029b7c0e43e015bd3202c0f9512c2b394e0481bfd2bdd096ae7b5b862f2', + index_block_hash: '0xad9403fc8d8eaef47816555cac51dca9d934384aa9b2581f9b9085509b2af915', + burn_block_height: 743853, + burn_block_hash: '0x00000000000000000008b9d65609c6b39bb89d7da35433e4b287835d7112d6d4', + burn_block_time: 1657123396, + }) + .addTx({ + tx_id: '0x1234', + sender_address: 'SPP117ENNNDQVQ1G3E0N1AP178GXBTC2YNQ3H7J' + }) + .addTxBnsNamespace({ + namespace_id: 'btc', + lifetime: 1000 + }) + .addTxBnsName({ + name: 'dayslikewater.btc', + namespace_id: 'btc', + zonefile_hash: 'b472a266d0bd89c13706a4132ccfb16f7c3b9fcb', + address: 'SPP117ENNNDQVQ1G3E0N1AP178GXBTC2YNQ3H7J' + }) + .addTxNftEvent({ + asset_event_type_id: DbAssetEventTypeId.Mint, + value: bnsNameCV('dayslikewater.btc'), + asset_identifier: 'SP000000000000000000002Q6VF78.bns::names', + recipient: 'SPP117ENNNDQVQ1G3E0N1AP178GXBTC2YNQ3H7J', + }) + .build(); + await db.update(block); + const microblock = new TestMicroblockStreamBuilder() + .addMicroblock({ + microblock_hash: '0xccdd11fef1792979bc54a9b686e9cc4fc3d64f2a9b2d8ee9d34fe27bfab783a4', + microblock_sequence: 0, + parent_index_block_hash: '0xad9403fc8d8eaef47816555cac51dca9d934384aa9b2581f9b9085509b2af915' + }) + .build(); + await db.updateMicroblocks(microblock); + + const name1 = await db.getName({ + name: 'dayslikewater.btc', + includeUnanchored: true, + chainId: ChainID.Mainnet + }); + expect(name1.found).toBe(true); + expect(name1.result?.namespace_id).toBe('btc'); + expect(name1.result?.tx_id).toBe('0x1234'); + expect(name1.result?.status).toBe('name-register'); + expect(name1.result?.expire_block).toBe(1001); + expect(name1.result?.address).toBe('SPP117ENNNDQVQ1G3E0N1AP178GXBTC2YNQ3H7J'); + + const payload = { + "events": [ + { + "txid": "0xa75ebee2c824c4943bf8494b101ea7ee7d44191b4a8f761582ce99ef28befb19", + "type": "contract_event", + "committed": true, + "event_index": 74, + "contract_event": { + "topic": "print", + "raw_value": "0x0c000000010a6174746163686d656e740c00000003106174746163686d656e742d696e646578010000000000000000000000000000e52b04686173680200000014b472a266d0bd89c13706a4132ccfb16f7c3b9fcb086d657461646174610c00000004046e616d65020000000d646179736c696b657761746572096e616d6573706163650200000003627463026f700d0000000d6e616d652d7472616e736665720974782d73656e6465720516016084eead6adbeee180dc0a855609d10eaf4c17", + "contract_identifier": "SP000000000000000000002Q6VF78.bns" + } + }, + { + "txid": "0xa75ebee2c824c4943bf8494b101ea7ee7d44191b4a8f761582ce99ef28befb19", + "type": "nft_transfer_event", + "committed": true, + "event_index": 73, + "nft_transfer_event": { + "sender": "SPP117ENNNDQVQ1G3E0N1AP178GXBTC2YNQ3H7J", + "raw_value": "0x0c00000002046e616d65020000000d646179736c696b657761746572096e616d6573706163650200000003627463", + "recipient": "SP1TY00PDWJVNVEX7H7KJGS2K2YXHTQMY8C0G1NVP", + "asset_identifier": "SP000000000000000000002Q6VF78.bns::names" + } + }, + { + "txid": "0xa75ebee2c824c4943bf8494b101ea7ee7d44191b4a8f761582ce99ef28befb19", + "type": "stx_transfer_event", + "committed": true, + "event_index": 71, + "stx_transfer_event": { + "amount": "2500", + "sender": "SP2KAF9RF86PVX3NEE27DFV1CQX0T4WGR41X3S45C.bns-marketplace-v3", + "recipient": "SP2KAF9RF86PVX3NEE27DFV1CQX0T4WGR41X3S45C" + } + } + ], + "block_hash": "0x7d18920cc47f731f186fb9f731d2e8d5029bbab6d73fd012ac3e10637a8e4a37", + "miner_txid": "0xbed35e9e7eb7f98583c87743d3860ab63f2506f7f1efe24740cd37f7708de0b4", + "block_height": 2, + "transactions": [ + { + "txid": "0xa75ebee2c824c4943bf8494b101ea7ee7d44191b4a8f761582ce99ef28befb19", + "raw_tx": "0x00000000010400016084eead6adbeee180dc0a855609d10eaf4c1700000000000000020000000000000bb80000e452e9d87e94a2a4364e89af3ab44b3ce1117afb6505721ff5b801294e1280f0616ee4d21a6ef9bcca1ea15ac65477e79df3427f7fd6c41c80938f8cca6d2cd0030200000002000316a6a7a70f41adbe8eae708ed7ec2cbf41a272182012626e732d6d61726b6574706c6163652d76330500000000000186a0020216016084eead6adbeee180dc0a855609d10eaf4c1716000000000000000000000000000000000000000003626e73056e616d65730c00000002046e616d65020000000d646179736c696b657761746572096e616d6573706163650200000003627463100216a6a7a70f41adbe8eae708ed7ec2cbf41a272182012626e732d6d61726b6574706c6163652d76330a6163636570742d626964000000030200000003627463020000000d646179736c696b6577617465720a0200000014b472a266d0bd89c13706a4132ccfb16f7c3b9fcb", + "status": "success", + "tx_index": 25, + "raw_result": "0x0703", + "contract_abi": null, + "execution_cost": { + "runtime": 381500, + "read_count": 42, + "read_length": 96314, + "write_count": 9, + "write_length": 359 + }, + "microblock_hash": null, + "microblock_sequence": null, + "microblock_parent_hash": null + } + ], + "anchored_cost": { + "runtime": 44194708, + "read_count": 4105, + "read_length": 11476905, + "write_count": 546, + "write_length": 47312 + }, + "burn_block_hash": "0x00000000000000000005e28a41cdb7461953b9424b4fd44a9211a145a1c0346d", + "burn_block_time": 1657125225, + "index_block_hash": "0xb70205d38a8666cbd071239b4ec28ae7d12a2c32341118d7c6d4d1e22f56014e", + "burn_block_height": 743854, + "parent_block_hash": "0x09458029b7c0e43e015bd3202c0f9512c2b394e0481bfd2bdd096ae7b5b862f2", + "parent_microblock": "0xccdd11fef1792979bc54a9b686e9cc4fc3d64f2a9b2d8ee9d34fe27bfab783a4", + "matured_miner_rewards": [], + "parent_burn_block_hash": "0x00000000000000000008b9d65609c6b39bb89d7da35433e4b287835d7112d6d4", + "parent_index_block_hash": "0xad9403fc8d8eaef47816555cac51dca9d934384aa9b2581f9b9085509b2af915", + "parent_burn_block_height": 743853, + "confirmed_microblocks_cost": { + "runtime": 48798, + "read_count": 10, + "read_length": 40042, + "write_count": 3, + "write_length": 19 + }, + "parent_microblock_sequence": 0, + "parent_burn_block_timestamp": 1657123396 + }; + + await httpPostRequest({ + host: '127.0.0.1', + port: eventServer.serverAddress.port, + path: '/new_block', + headers: { 'Content-Type': 'application/json' }, + body: Buffer.from(JSON.stringify(payload), 'utf8'), + throwOnNotOK: true, + }); + + const name2 = await db.getName({ + name: 'dayslikewater.btc', + includeUnanchored: true, + chainId: ChainID.Mainnet + }); + expect(name2.found).toBe(true); + expect(name2.result?.namespace_id).toBe('btc'); + expect(name2.result?.tx_id).toBe('0xa75ebee2c824c4943bf8494b101ea7ee7d44191b4a8f761582ce99ef28befb19'); + expect(name2.result?.status).toBe('name-transfer'); + expect(name2.result?.expire_block).toBe(1001); // Unchanged as it was not renewed + expect(name2.result?.address).toBe('SP1TY00PDWJVNVEX7H7KJGS2K2YXHTQMY8C0G1NVP'); + }); + + test('name-renewal called with no zonefile_hash', async () => { + const block = new TestBlockBuilder({ + block_height: 1, + block_hash: '0xf81ef7f114213b9034a4378345a931a97c781fab398c3d7a2053f0d0bf48d311', + index_block_hash: '0xaec282925b5096c0bd98588d25a97e134bcc4f19b6600859fa267cf0ee4eaf2d', + burn_block_height: 726955, + burn_block_hash: '0x00000000000000000001523f01cb4304d39527454d2eec79817b50c033a5c5d9', + burn_block_time: 1647068146, + }) + .addTx({ + tx_id: '0x1234', + sender_address: 'SP3GWTV1SMF9HDS4VY5NMM833CHH266W4YBASVYMZ' + }) + .addTxBnsNamespace({ + namespace_id: 'id', + lifetime: 1000 + }) + .addTxBnsName({ + name: 'friedger.id', + namespace_id: 'id', + zonefile_hash: 'b472a266d0bd89c13706a4132ccfb16f7c3b9fcb', + address: 'SP3GWTV1SMF9HDS4VY5NMM833CHH266W4YBASVYMZ' + }) + .addTxNftEvent({ + asset_event_type_id: DbAssetEventTypeId.Mint, + value: bnsNameCV('friedger.id'), + asset_identifier: 'SP000000000000000000002Q6VF78.bns::names', + recipient: 'SP3GWTV1SMF9HDS4VY5NMM833CHH266W4YBASVYMZ', + }) + .build(); + await db.update(block); + const microblock = new TestMicroblockStreamBuilder() + .addMicroblock({ + microblock_hash: '0x640362ec47c40de3337491993e42efe60d05187431633ab03c3f5d33e70d1f8e', + microblock_sequence: 0, + parent_index_block_hash: '0xaec282925b5096c0bd98588d25a97e134bcc4f19b6600859fa267cf0ee4eaf2d' + }) + .build(); + await db.updateMicroblocks(microblock); + + const name1 = await db.getName({ + name: 'friedger.id', + includeUnanchored: true, + chainId: ChainID.Mainnet + }); + expect(name1.found).toBe(true); + expect(name1.result?.namespace_id).toBe('id'); + expect(name1.result?.tx_id).toBe('0x1234'); + expect(name1.result?.status).toBe('name-register'); + expect(name1.result?.expire_block).toBe(1001); + expect(name1.result?.address).toBe('SP3GWTV1SMF9HDS4VY5NMM833CHH266W4YBASVYMZ'); + + const payload = { + "events": [], + "block_hash": "0xaaee893667244adcb8581abac372f1f8c385d402b71e8e8b4ac91e8066024fd5", + "miner_txid": "0x6ff493c6b98b9cff0638c7c5276af8e627b8ed779965a5f1c11bbc0810834b3e", + "block_height": 2, + "transactions": [ + { + "txid": "0xf037c8da8210e2a348bbecd3bc44901de875d3774c5fce49cb75d95f2dc2ca4d", + "raw_tx": "0x00000000010500e1cd6c39a3d316e49bf16b4a20636462231b84f200000000000000000000000000000000000094f2c8529dcb8a55a5cfd4434c68cae9cd54f26f01c656369585db3ba364150a4fead679adf35cf5ba1026656b3873daf3380f48ec6dcc175ada868e531decf5001d04c185cad28a3f5299d3fcbcbcbe66b2e1e227000000000000000000000000000186a0000064cc0eb565e85c0d4110c9a760c8fdad21999409f89320e355f326c144b8ada4268244f80734170cea96f683d2431b59f07f276a10efc80793d4dceef8feb2310302000000000216000000000000000000000000000000000000000003626e730c6e616d652d72656e6577616c000000050200000002696402000000086672696564676572010000000000000000000000000001a72a0909", + "status": "success", + "tx_index": 2, + "raw_result": "0x0703", + "contract_abi": null, + "execution_cost": { + "runtime": 184253, + "read_count": 11, + "read_length": 43250, + "write_count": 1, + "write_length": 143 + }, + "microblock_hash": null, + "microblock_sequence": null, + "microblock_parent_hash": null + } + ], + "anchored_cost": { + "runtime": 28375070, + "read_count": 8888, + "read_length": 1085153, + "write_count": 593, + "write_length": 156284 + }, + "burn_block_hash": "0x0000000000000000000552fb5fd8c08ad8f1ef30c239369a8a3380ec1566047a", + "burn_block_time": 1647068392, + "index_block_hash": "0x9ff46918054b1aa94571a60e14921a56977f26af2adcbf4a7f64138566feba48", + "burn_block_height": 726956, + "parent_block_hash": "0xf81ef7f114213b9034a4378345a931a97c781fab398c3d7a2053f0d0bf48d311", + "parent_microblock": "0x640362ec47c40de3337491993e42efe60d05187431633ab03c3f5d33e70d1f8e", + "matured_miner_rewards": [], + "parent_burn_block_hash": "0x00000000000000000001523f01cb4304d39527454d2eec79817b50c033a5c5d9", + "parent_index_block_hash": "0xaec282925b5096c0bd98588d25a97e134bcc4f19b6600859fa267cf0ee4eaf2d", + "parent_burn_block_height": 726955, + "confirmed_microblocks_cost": { + "runtime": 360206, + "read_count": 38, + "read_length": 95553, + "write_count": 8, + "write_length": 378 + }, + "parent_microblock_sequence": 0, + "parent_burn_block_timestamp": 1647068146 + }; + + await httpPostRequest({ + host: '127.0.0.1', + port: eventServer.serverAddress.port, + path: '/new_block', + headers: { 'Content-Type': 'application/json' }, + body: Buffer.from(JSON.stringify(payload), 'utf8'), + throwOnNotOK: true, + }); + + const name2 = await db.getName({ + name: 'friedger.id', + includeUnanchored: true, + chainId: ChainID.Mainnet + }); + expect(name2.found).toBe(true); + expect(name2.result?.namespace_id).toBe('id'); + expect(name2.result?.tx_id).toBe('0xf037c8da8210e2a348bbecd3bc44901de875d3774c5fce49cb75d95f2dc2ca4d'); + expect(name2.result?.status).toBe('name-renewal'); + expect(name2.result?.expire_block).toBe(1002); // Updated correctly + expect(name2.result?.address).toBe('SP3GWTV1SMF9HDS4VY5NMM833CHH266W4YBASVYMZ'); + }); + test('/attachments/new with re-orged zonefiles', async () => { const block1 = new TestBlockBuilder({ block_height: 1, From 040eb7e8c29b454adea537fcbe32e8308f5678a5 Mon Sep 17 00:00:00 2001 From: semantic-release-bot Date: Wed, 31 Aug 2022 17:13:09 +0000 Subject: [PATCH 19/24] chore(release): 5.0.0-beta.5 [skip ci] ## [5.0.0-beta.5](https://github.com/hirosystems/stacks-blockchain-api/compare/v5.0.0-beta.4...v5.0.0-beta.5) (2022-08-31) ### Bug Fixes * detect name transfers and renewals in special circumstances ([#1303](https://github.com/hirosystems/stacks-blockchain-api/issues/1303)) ([cd381a9](https://github.com/hirosystems/stacks-blockchain-api/commit/cd381a95b4d0d3f4bb08e447500153c3f652eff6)) --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c5ba0261..ebbb5e96 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,10 @@ +## [5.0.0-beta.5](https://github.com/hirosystems/stacks-blockchain-api/compare/v5.0.0-beta.4...v5.0.0-beta.5) (2022-08-31) + + +### Bug Fixes + +* detect name transfers and renewals in special circumstances ([#1303](https://github.com/hirosystems/stacks-blockchain-api/issues/1303)) ([cd381a9](https://github.com/hirosystems/stacks-blockchain-api/commit/cd381a95b4d0d3f4bb08e447500153c3f652eff6)) + ## [5.0.0-beta.4](https://github.com/hirosystems/stacks-blockchain-api/compare/v5.0.0-beta.3...v5.0.0-beta.4) (2022-08-31) From bbf4b2d2b8c7f6ed30bfda6eaa430d5c2e84cdf5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafael=20C=C3=A1rdenas?= Date: Thu, 1 Sep 2022 08:21:02 -0500 Subject: [PATCH 20/24] feat: add indexes for index_block_hash on BNS tables (#1304) * feat: add bns indexes for index_block_hash * fix: go back to default log level before importing subdomains --- src/event-replay/event-replay.ts | 3 ++- src/migrations/1608030374841_namespaces.ts | 1 + src/migrations/1608030374842_names.ts | 1 + src/migrations/1610030345948_subdomains.ts | 1 + 4 files changed, 5 insertions(+), 1 deletion(-) diff --git a/src/event-replay/event-replay.ts b/src/event-replay/event-replay.ts index 09c6700d..f0022d6a 100644 --- a/src/event-replay/event-replay.ts +++ b/src/event-replay/event-replay.ts @@ -2,7 +2,7 @@ import * as path from 'path'; import * as fs from 'fs'; import { cycleMigrations, dangerousDropAllTables, PgDataStore } from '../datastore/postgres-store'; import { startEventServer } from '../event-stream/event-server'; -import { getApiConfiguredChainID, httpPostRequest, logger } from '../helpers'; +import { defaultLogLevel, getApiConfiguredChainID, httpPostRequest, logger } from '../helpers'; import { findBnsGenesisBlockData, findTsvBlockHeight, getDbBlockHeight } from './helpers'; import { importV1BnsNames, importV1BnsSubdomains, importV1TokenOfferingData } from '../import-v1'; @@ -179,6 +179,7 @@ export async function importEventsFromTsv( } await db.finishEventReplay(); if (process.env.BNS_IMPORT_DIR) { + logger.level = defaultLogLevel; await importV1BnsSubdomains(db, process.env.BNS_IMPORT_DIR, tsvGenesisBlockData); } console.log(`Event import and playback successful.`); diff --git a/src/migrations/1608030374841_namespaces.ts b/src/migrations/1608030374841_namespaces.ts index 1ac9280d..0c5694c4 100644 --- a/src/migrations/1608030374841_namespaces.ts +++ b/src/migrations/1608030374841_namespaces.ts @@ -91,6 +91,7 @@ export async function up(pgm: MigrationBuilder): Promise { }, }); + pgm.createIndex('namespaces', 'index_block_hash'); pgm.createIndex('namespaces', [ { name: 'ready_block', sort: 'DESC' }, { name: 'microblock_sequence', sort: 'DESC' }, diff --git a/src/migrations/1608030374842_names.ts b/src/migrations/1608030374842_names.ts index 7d0feaf3..b745a769 100644 --- a/src/migrations/1608030374842_names.ts +++ b/src/migrations/1608030374842_names.ts @@ -84,6 +84,7 @@ export async function up(pgm: MigrationBuilder): Promise { }); pgm.createIndex('names', 'namespace_id'); + pgm.createIndex('names', 'index_block_hash'); pgm.createIndex('names', [ { name: 'registered_at', sort: 'DESC' }, { name: 'microblock_sequence', sort: 'DESC' }, diff --git a/src/migrations/1610030345948_subdomains.ts b/src/migrations/1610030345948_subdomains.ts index bcd24daa..541a454c 100644 --- a/src/migrations/1610030345948_subdomains.ts +++ b/src/migrations/1610030345948_subdomains.ts @@ -85,6 +85,7 @@ export async function up(pgm: MigrationBuilder): Promise { }); pgm.createIndex('subdomains', 'name'); + pgm.createIndex('subdomains', 'index_block_hash'); pgm.createIndex('subdomains', [ { name: 'block_height', sort: 'DESC' }, { name: 'microblock_sequence', sort: 'DESC' }, From ed8b6f5839c0c28339bb123e0411902ec78a0209 Mon Sep 17 00:00:00 2001 From: semantic-release-bot Date: Thu, 1 Sep 2022 13:36:31 +0000 Subject: [PATCH 21/24] chore(release): 5.0.0-beta.6 [skip ci] ## [5.0.0-beta.6](https://github.com/hirosystems/stacks-blockchain-api/compare/v5.0.0-beta.5...v5.0.0-beta.6) (2022-09-01) ### Features * add indexes for index_block_hash on BNS tables ([#1304](https://github.com/hirosystems/stacks-blockchain-api/issues/1304)) ([bbf4b2d](https://github.com/hirosystems/stacks-blockchain-api/commit/bbf4b2d2b8c7f6ed30bfda6eaa430d5c2e84cdf5)) --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ebbb5e96..8f7fea14 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,10 @@ +## [5.0.0-beta.6](https://github.com/hirosystems/stacks-blockchain-api/compare/v5.0.0-beta.5...v5.0.0-beta.6) (2022-09-01) + + +### Features + +* add indexes for index_block_hash on BNS tables ([#1304](https://github.com/hirosystems/stacks-blockchain-api/issues/1304)) ([bbf4b2d](https://github.com/hirosystems/stacks-blockchain-api/commit/bbf4b2d2b8c7f6ed30bfda6eaa430d5c2e84cdf5)) + ## [5.0.0-beta.5](https://github.com/hirosystems/stacks-blockchain-api/compare/v5.0.0-beta.4...v5.0.0-beta.5) (2022-08-31) From 6a129369c6d9fcdc79b5a7ad288d37784cbe77cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafael=20C=C3=A1rdenas?= Date: Wed, 7 Sep 2022 08:30:27 -0500 Subject: [PATCH 22/24] fix: filter BNS processing for successful txs only (#1309) --- src/event-stream/bns/bns-helpers.ts | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/event-stream/bns/bns-helpers.ts b/src/event-stream/bns/bns-helpers.ts index 485119f6..1166353b 100644 --- a/src/event-stream/bns/bns-helpers.ts +++ b/src/event-stream/bns/bns-helpers.ts @@ -243,6 +243,7 @@ export function getBnsContractID(chainId: ChainID) { function isEventFromBnsContract(event: SmartContractEvent): boolean { return ( + event.committed === true && event.contract_event.topic === printTopic && (event.contract_event.contract_identifier === BnsContractIdentifier.mainnet || event.contract_event.contract_identifier === BnsContractIdentifier.testnet) @@ -255,6 +256,7 @@ export function parseNameRenewalWithNoZonefileHashFromContractCall( ): DbBnsName | undefined { const payload = tx.parsed_tx.payload; if ( + tx.core_tx.status === 'success' && payload.type_id === TxPayloadTypeID.ContractCall && payload.function_name === 'name-renewal' && getBnsContractID(chainId) === `${payload.address}.${payload.contract_name}` && @@ -292,7 +294,7 @@ export function parseNameFromContractEvent( blockHeight: number, chainId: ChainID ): DbBnsName | undefined { - if (!isEventFromBnsContract(event)) { + if (tx.core_tx.status !== 'success' || !isEventFromBnsContract(event)) { return; } let attachment: Attachment; @@ -338,7 +340,7 @@ export function parseNamespaceFromContractEvent( tx: CoreNodeParsedTxMessage, blockHeight: number ): DbBnsNamespace | undefined { - if (!isEventFromBnsContract(event)) { + if (tx.core_tx.status !== 'success' || !isEventFromBnsContract(event)) { return; } // Look for a `namespace-ready` BNS print event. From 1a70fa708fce5d1bccce7c20dbe89096a43cb868 Mon Sep 17 00:00:00 2001 From: semantic-release-bot Date: Wed, 7 Sep 2022 13:36:20 +0000 Subject: [PATCH 23/24] chore(release): 5.0.0-beta.7 [skip ci] ## [5.0.0-beta.7](https://github.com/hirosystems/stacks-blockchain-api/compare/v5.0.0-beta.6...v5.0.0-beta.7) (2022-09-07) ### Bug Fixes * filter BNS processing for successful txs only ([#1309](https://github.com/hirosystems/stacks-blockchain-api/issues/1309)) ([6a12936](https://github.com/hirosystems/stacks-blockchain-api/commit/6a129369c6d9fcdc79b5a7ad288d37784cbe77cc)) --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8f7fea14..7e83be19 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,10 @@ +## [5.0.0-beta.7](https://github.com/hirosystems/stacks-blockchain-api/compare/v5.0.0-beta.6...v5.0.0-beta.7) (2022-09-07) + + +### Bug Fixes + +* filter BNS processing for successful txs only ([#1309](https://github.com/hirosystems/stacks-blockchain-api/issues/1309)) ([6a12936](https://github.com/hirosystems/stacks-blockchain-api/commit/6a129369c6d9fcdc79b5a7ad288d37784cbe77cc)) + ## [5.0.0-beta.6](https://github.com/hirosystems/stacks-blockchain-api/compare/v5.0.0-beta.5...v5.0.0-beta.6) (2022-09-01) From ad35ed346652c79521a6b9c9944a2236d65debb2 Mon Sep 17 00:00:00 2001 From: semantic-release-bot Date: Wed, 7 Sep 2022 13:52:25 +0000 Subject: [PATCH 24/24] chore(release): 5.0.0 [skip ci] MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## [5.0.0](https://github.com/hirosystems/stacks-blockchain-api/compare/v4.1.2...v5.0.0) (2022-09-07) ### ⚠ BREAKING CHANGES * optimize tables and improve canonical treatment of BNS data (#1287) ### Features * add indexes for index_block_hash on BNS tables ([#1304](https://github.com/hirosystems/stacks-blockchain-api/issues/1304)) ([bbf4b2d](https://github.com/hirosystems/stacks-blockchain-api/commit/bbf4b2d2b8c7f6ed30bfda6eaa430d5c2e84cdf5)) * optimize tables and improve canonical treatment of BNS data ([#1287](https://github.com/hirosystems/stacks-blockchain-api/issues/1287)) ([1f64818](https://github.com/hirosystems/stacks-blockchain-api/commit/1f648187b8c701e802a06bac52b077fd10571ff7)) ### Bug Fixes * add postgres connection error checking for ECONNRESET code ([03a1896](https://github.com/hirosystems/stacks-blockchain-api/commit/03a1896cff8937a5f39a8b75e5adf51a6344592c)) * bump version ([3863cce](https://github.com/hirosystems/stacks-blockchain-api/commit/3863cce1a64cf7a4c6cffd4f888c049cfd3ada65)) * detect name transfers and renewals in special circumstances ([#1303](https://github.com/hirosystems/stacks-blockchain-api/issues/1303)) ([cd381a9](https://github.com/hirosystems/stacks-blockchain-api/commit/cd381a95b4d0d3f4bb08e447500153c3f652eff6)) * filter BNS processing for successful txs only ([#1309](https://github.com/hirosystems/stacks-blockchain-api/issues/1309)) ([6a12936](https://github.com/hirosystems/stacks-blockchain-api/commit/6a129369c6d9fcdc79b5a7ad288d37784cbe77cc)) * import BNS v1 data during event replay ([#1301](https://github.com/hirosystems/stacks-blockchain-api/issues/1301)) ([bc59817](https://github.com/hirosystems/stacks-blockchain-api/commit/bc59817aa98dd3a978a27b73d14738b64eb823f9)) --- CHANGELOG.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7e83be19..eb44683e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,24 @@ +## [5.0.0](https://github.com/hirosystems/stacks-blockchain-api/compare/v4.1.2...v5.0.0) (2022-09-07) + + +### ⚠ BREAKING CHANGES + +* optimize tables and improve canonical treatment of BNS data (#1287) + +### Features + +* add indexes for index_block_hash on BNS tables ([#1304](https://github.com/hirosystems/stacks-blockchain-api/issues/1304)) ([bbf4b2d](https://github.com/hirosystems/stacks-blockchain-api/commit/bbf4b2d2b8c7f6ed30bfda6eaa430d5c2e84cdf5)) +* optimize tables and improve canonical treatment of BNS data ([#1287](https://github.com/hirosystems/stacks-blockchain-api/issues/1287)) ([1f64818](https://github.com/hirosystems/stacks-blockchain-api/commit/1f648187b8c701e802a06bac52b077fd10571ff7)) + + +### Bug Fixes + +* add postgres connection error checking for ECONNRESET code ([03a1896](https://github.com/hirosystems/stacks-blockchain-api/commit/03a1896cff8937a5f39a8b75e5adf51a6344592c)) +* bump version ([3863cce](https://github.com/hirosystems/stacks-blockchain-api/commit/3863cce1a64cf7a4c6cffd4f888c049cfd3ada65)) +* detect name transfers and renewals in special circumstances ([#1303](https://github.com/hirosystems/stacks-blockchain-api/issues/1303)) ([cd381a9](https://github.com/hirosystems/stacks-blockchain-api/commit/cd381a95b4d0d3f4bb08e447500153c3f652eff6)) +* filter BNS processing for successful txs only ([#1309](https://github.com/hirosystems/stacks-blockchain-api/issues/1309)) ([6a12936](https://github.com/hirosystems/stacks-blockchain-api/commit/6a129369c6d9fcdc79b5a7ad288d37784cbe77cc)) +* import BNS v1 data during event replay ([#1301](https://github.com/hirosystems/stacks-blockchain-api/issues/1301)) ([bc59817](https://github.com/hirosystems/stacks-blockchain-api/commit/bc59817aa98dd3a978a27b73d14738b64eb823f9)) + ## [5.0.0-beta.7](https://github.com/hirosystems/stacks-blockchain-api/compare/v5.0.0-beta.6...v5.0.0-beta.7) (2022-09-07)