mirror of
https://github.com/alexgo-io/stacks-blockchain-api.git
synced 2026-01-12 16:53:19 +08:00
chore: merge master
This commit is contained in:
1
.github/workflows/ci.yml
vendored
1
.github/workflows/ci.yml
vendored
@@ -512,6 +512,7 @@ jobs:
|
||||
@semantic-release/changelog
|
||||
@semantic-release/git
|
||||
@semantic-release/exec
|
||||
conventional-changelog-conventionalcommits
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
81
CHANGELOG.md
81
CHANGELOG.md
@@ -1,3 +1,84 @@
|
||||
## [5.0.0](https://github.com/hirosystems/stacks-blockchain-api/compare/v4.1.2...v5.0.0) (2022-09-07)
|
||||
|
||||
|
||||
### ⚠ BREAKING CHANGES
|
||||
|
||||
* optimize tables and improve canonical treatment of BNS data (#1287)
|
||||
|
||||
### Features
|
||||
|
||||
* add indexes for index_block_hash on BNS tables ([#1304](https://github.com/hirosystems/stacks-blockchain-api/issues/1304)) ([bbf4b2d](https://github.com/hirosystems/stacks-blockchain-api/commit/bbf4b2d2b8c7f6ed30bfda6eaa430d5c2e84cdf5))
|
||||
* optimize tables and improve canonical treatment of BNS data ([#1287](https://github.com/hirosystems/stacks-blockchain-api/issues/1287)) ([1f64818](https://github.com/hirosystems/stacks-blockchain-api/commit/1f648187b8c701e802a06bac52b077fd10571ff7))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* add postgres connection error checking for ECONNRESET code ([03a1896](https://github.com/hirosystems/stacks-blockchain-api/commit/03a1896cff8937a5f39a8b75e5adf51a6344592c))
|
||||
* bump version ([3863cce](https://github.com/hirosystems/stacks-blockchain-api/commit/3863cce1a64cf7a4c6cffd4f888c049cfd3ada65))
|
||||
* detect name transfers and renewals in special circumstances ([#1303](https://github.com/hirosystems/stacks-blockchain-api/issues/1303)) ([cd381a9](https://github.com/hirosystems/stacks-blockchain-api/commit/cd381a95b4d0d3f4bb08e447500153c3f652eff6))
|
||||
* filter BNS processing for successful txs only ([#1309](https://github.com/hirosystems/stacks-blockchain-api/issues/1309)) ([6a12936](https://github.com/hirosystems/stacks-blockchain-api/commit/6a129369c6d9fcdc79b5a7ad288d37784cbe77cc))
|
||||
* import BNS v1 data during event replay ([#1301](https://github.com/hirosystems/stacks-blockchain-api/issues/1301)) ([bc59817](https://github.com/hirosystems/stacks-blockchain-api/commit/bc59817aa98dd3a978a27b73d14738b64eb823f9))
|
||||
|
||||
## [5.0.0-beta.7](https://github.com/hirosystems/stacks-blockchain-api/compare/v5.0.0-beta.6...v5.0.0-beta.7) (2022-09-07)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* filter BNS processing for successful txs only ([#1309](https://github.com/hirosystems/stacks-blockchain-api/issues/1309)) ([6a12936](https://github.com/hirosystems/stacks-blockchain-api/commit/6a129369c6d9fcdc79b5a7ad288d37784cbe77cc))
|
||||
|
||||
## [5.0.0-beta.6](https://github.com/hirosystems/stacks-blockchain-api/compare/v5.0.0-beta.5...v5.0.0-beta.6) (2022-09-01)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add indexes for index_block_hash on BNS tables ([#1304](https://github.com/hirosystems/stacks-blockchain-api/issues/1304)) ([bbf4b2d](https://github.com/hirosystems/stacks-blockchain-api/commit/bbf4b2d2b8c7f6ed30bfda6eaa430d5c2e84cdf5))
|
||||
|
||||
## [5.0.0-beta.5](https://github.com/hirosystems/stacks-blockchain-api/compare/v5.0.0-beta.4...v5.0.0-beta.5) (2022-08-31)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* detect name transfers and renewals in special circumstances ([#1303](https://github.com/hirosystems/stacks-blockchain-api/issues/1303)) ([cd381a9](https://github.com/hirosystems/stacks-blockchain-api/commit/cd381a95b4d0d3f4bb08e447500153c3f652eff6))
|
||||
|
||||
## [5.0.0-beta.4](https://github.com/hirosystems/stacks-blockchain-api/compare/v5.0.0-beta.3...v5.0.0-beta.4) (2022-08-31)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* add postgres connection error checking for ECONNRESET code ([03a1896](https://github.com/hirosystems/stacks-blockchain-api/commit/03a1896cff8937a5f39a8b75e5adf51a6344592c))
|
||||
|
||||
## [5.0.0-beta.3](https://github.com/hirosystems/stacks-blockchain-api/compare/v5.0.0-beta.2...v5.0.0-beta.3) (2022-08-31)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* import BNS v1 data during event replay ([#1301](https://github.com/hirosystems/stacks-blockchain-api/issues/1301)) ([bc59817](https://github.com/hirosystems/stacks-blockchain-api/commit/bc59817aa98dd3a978a27b73d14738b64eb823f9))
|
||||
|
||||
## [5.0.0-beta.2](https://github.com/hirosystems/stacks-blockchain-api/compare/v5.0.0-beta.1...v5.0.0-beta.2) (2022-08-26)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* bump version ([3863cce](https://github.com/hirosystems/stacks-blockchain-api/commit/3863cce1a64cf7a4c6cffd4f888c049cfd3ada65))
|
||||
|
||||
## [5.0.0-beta.1](https://github.com/hirosystems/stacks-blockchain-api/compare/v4.1.2...v5.0.0-beta.1) (2022-08-26)
|
||||
|
||||
|
||||
### ⚠ BREAKING CHANGES
|
||||
|
||||
* optimize tables and improve canonical treatment of BNS data (#1287)
|
||||
|
||||
### Features
|
||||
|
||||
* optimize tables and improve canonical treatment of BNS data ([#1287](https://github.com/hirosystems/stacks-blockchain-api/issues/1287)) ([1f64818](https://github.com/hirosystems/stacks-blockchain-api/commit/1f648187b8c701e802a06bac52b077fd10571ff7))
|
||||
|
||||
## [4.1.2](https://github.com/hirosystems/stacks-blockchain-api/compare/v4.1.1...v4.1.2) (2022-08-18)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* refresh materialized views concurrently ([#1270](https://github.com/hirosystems/stacks-blockchain-api/issues/1270)) ([057c541](https://github.com/hirosystems/stacks-blockchain-api/commit/057c541b8c31402b6ff823cce0e3ed435ebe74a8))
|
||||
|
||||
## [4.1.1](https://github.com/hirosystems/stacks-blockchain-api/compare/v4.1.0...v4.1.1) (2022-08-03)
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"network_identifier": {
|
||||
"blockchain": "stacks",
|
||||
"network": "mainnet"
|
||||
},
|
||||
"account_identifier": {
|
||||
"address": "SP2W6477BT5CRWVC5D5RFNPNAR8R2NW63SMMCAWMC",
|
||||
"metadata": {}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"network_identifier": {
|
||||
"blockchain": "stacks",
|
||||
"network": "mainnet"
|
||||
},
|
||||
"public_key": {
|
||||
"hex_bytes": "025c13b2fc2261956d8a4ad07d481b1a3b2cbf93a24f992249a61c3a1c4de79c51",
|
||||
"curve_type": "secp256k1"
|
||||
},
|
||||
"metadata": {}
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
[
|
||||
{
|
||||
"network_identifier": {
|
||||
"blockchain": "stacks",
|
||||
"network": "mainnet"
|
||||
},
|
||||
"metadata": {}
|
||||
}
|
||||
]
|
||||
10
docs/api/rosetta/rosetta-block-request-body.example.json
Normal file
10
docs/api/rosetta/rosetta-block-request-body.example.json
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"network_identifier": {
|
||||
"blockchain": "stacks",
|
||||
"network": "mainnet"
|
||||
},
|
||||
"block_identifier": {
|
||||
"index": 71107,
|
||||
"hash": "0xce7e16561150f3a379845f4e96c3dd8f8396e397495821c9eec6b429391c529c"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"network_identifier": {
|
||||
"blockchain": "stacks",
|
||||
"network": "mainnet"
|
||||
},
|
||||
"block_identifier": {
|
||||
"hash": "0xce7e16561150f3a379845f4e96c3dd8f8396e397495821c9eec6b429391c529c"
|
||||
},
|
||||
"transaction_identifier": {
|
||||
"hash": "0x49354cc7b18dc5296c945a8e89f7d758dac14f1ab38d4c33dfe45ec1765ab339"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,34 @@
|
||||
{
|
||||
"network_identifier": {
|
||||
"blockchain": "stacks",
|
||||
"network": "mainnet"
|
||||
},
|
||||
"options": {
|
||||
"sender_address": "SP3Y0BBCCCBFAMYCYN3F35CX9MH1J2GATP53JX3FA",
|
||||
"type": "stack_stx",
|
||||
"status": "success",
|
||||
"token_transfer_recipient_address": "string",
|
||||
"amount": "500000",
|
||||
"symbol": "STX",
|
||||
"decimals": 6,
|
||||
"gas_limit": 0,
|
||||
"gas_price": 0,
|
||||
"suggested_fee_multiplier": 0,
|
||||
"max_fee": "12380898",
|
||||
"fee": "fee",
|
||||
"size": 260,
|
||||
"memo": "test.memo",
|
||||
"number_of_cycles": 0,
|
||||
"contract_address": "SP112T7BYCNEDCZ9TCYXCXFNJG9WXX5Q5SG6DSBAM",
|
||||
"contract_name": "STX transfer",
|
||||
"burn_block_height": 0,
|
||||
"delegate_to": "cb3df38053d132895220b9ce471f6b676db5b9bf0b4adefb55f2118ece2478df01.STB44HYPYAT2BB2QE513NSP81HTMYWBJP02HPGK6",
|
||||
"pox_addr": "1Xik14zRm29UsyS6DjhYg4iZeZqsDa8D3"
|
||||
},
|
||||
"public_keys": [
|
||||
{
|
||||
"hex_bytes": "publicKey",
|
||||
"curve_type": "secp256k1"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"network_identifier": {
|
||||
"blockchain": "stacks",
|
||||
"network": "mainnet"
|
||||
},
|
||||
"signed_transaction": "0x80800000000400539886f96611ba3ba6cef9618f8c78118b37c5be000000000000000000000000000000b400017a33a91515ef48608a99c6adecd2eb258e11534a1acf66348f5678c8e2c8f83d243555ed67a0019d3500df98563ca31321c1a675b43ef79f146e322fe08df75103020000000000051a1ae3f911d8f1d46d7416bfbe4b593fd41eac19cb000000000007a12000000000000000000000000000000000000000000000000000000000000000000000"
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"network_identifier": {
|
||||
"blockchain": "stacks",
|
||||
"network": "mainnet"
|
||||
},
|
||||
"transaction_identifier": {
|
||||
"hash": "0xe6761e6ce26b366e1db70da31096f0de47f623e70f0b495b20f658b03bd21cea"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"network_identifier": {
|
||||
"blockchain": "stacks",
|
||||
"network": "mainnet"
|
||||
},
|
||||
"unsigned_transaction": "00000000010400539886f96611ba3ba6cef9618f8c78118b37c5be0000000000000000000000000000006400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003020000000000051ab71a091b4b8b7661a661c620966ab6573bc2dcd3000000000007a12074657374207472616e73616374696f6e000000000000000000000000000000000000",
|
||||
"signatures": [
|
||||
{
|
||||
"signing_payload": {
|
||||
"address": "string",
|
||||
"account_identifier": {
|
||||
"address": "STB44HYPYAT2BB2QE513NSP81HTMYWBJP02HPGK6",
|
||||
"metadata": {}
|
||||
},
|
||||
"hex_bytes": "string",
|
||||
"signature_type": "ecdsa"
|
||||
},
|
||||
"public_key": {
|
||||
"hex_bytes": "025c13b2fc2261956d8a4ad07d481b1a3b2cbf93a24f992249a61c3a1c4de79c51",
|
||||
"curve_type": "secp256k1"
|
||||
},
|
||||
"signature_type": "ecdsa",
|
||||
"hex_bytes": "string"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"network_identifier": {
|
||||
"blockchain": "stacks",
|
||||
"network": "mainnet"
|
||||
},
|
||||
"signed_transaction": "0x80800000000400539886f96611ba3ba6cef9618f8c78118b37c5be000000000000000000000000000000b400017a33a91515ef48608a99c6adecd2eb258e11534a1acf66348f5678c8e2c8f83d243555ed67a0019d3500df98563ca31321c1a675b43ef79f146e322fe08df75103020000000000051a1ae3f911d8f1d46d7416bfbe4b593fd41eac19cb000000000007a12000000000000000000000000000000000000000000000000000000000000000000000"
|
||||
}
|
||||
@@ -0,0 +1,27 @@
|
||||
{
|
||||
"network_identifier": {
|
||||
"blockchain": "stacks",
|
||||
"network": "mainnet"
|
||||
},
|
||||
"operations": [
|
||||
{
|
||||
"operation_identifier": {
|
||||
"index": 0,
|
||||
"network_index": 0
|
||||
},
|
||||
"related_operations": [
|
||||
{
|
||||
"index": 0,
|
||||
"network_index": 0
|
||||
}
|
||||
],
|
||||
"type": "stack_stx",
|
||||
"status": "success",
|
||||
"account": {
|
||||
"address": "STB44HYPYAT2BB2QE513NSP81HTMYWBJP02HPGK6",
|
||||
"metadata": {}
|
||||
}
|
||||
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
"0x0100000000000000000000000000000095"
|
||||
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"sender": "STM9EQRAB3QAKF8NKTP15WJT7VHH4EWG3DJB4W29",
|
||||
"arguments": [
|
||||
"0x0100000000000000000000000000000095"
|
||||
]
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -56,8 +56,12 @@
|
||||
"engineStrict": true,
|
||||
"release": {
|
||||
"plugins": [
|
||||
"@semantic-release/commit-analyzer",
|
||||
"@semantic-release/release-notes-generator",
|
||||
["@semantic-release/commit-analyzer", {
|
||||
"preset": "conventionalcommits"
|
||||
}],
|
||||
["@semantic-release/release-notes-generator", {
|
||||
"preset": "conventionalcommits"
|
||||
}],
|
||||
[
|
||||
"@semantic-release/exec",
|
||||
{
|
||||
|
||||
66
readme.md
66
readme.md
@@ -98,19 +98,51 @@ For running offline mode set an environment variable `STACKS_API_MODE=offline`
|
||||
|
||||
## Event Replay
|
||||
|
||||
The stacks-node is only able to emit events live as they happen. This poses a problem in the scenario where the stacks-blockchain-api needs to
|
||||
be upgraded and its database cannot be migrated to a new schema. One way to handle this upgrade is to wipe the stacks-blockchain-api's database
|
||||
and stacks-node working directory, and re-sync from scratch.
|
||||
The stacks-node is only able to emit events live as they happen. This poses a problem in the
|
||||
scenario where the stacks-blockchain-api needs to be upgraded and its database cannot be migrated to
|
||||
a new schema. One way to handle this upgrade is to wipe the stacks-blockchain-api's database and
|
||||
stacks-node working directory, and re-sync from scratch.
|
||||
|
||||
Alternatively, an event-replay feature is available where the API records the HTTP POST requests from the stacks-node event emitter, then streams
|
||||
these events back to itself. Essentially simulating a wipe & full re-sync, but much quicker.
|
||||
Alternatively, an event-replay feature is available where the API records the HTTP POST requests
|
||||
from the stacks-node event emitter, then streams these events back to itself. Essentially simulating
|
||||
a wipe & full re-sync, but much quicker.
|
||||
|
||||
The feature can be used via program args. For example, if there are breaking changes in the API's sql schema, like adding a new column which requires
|
||||
event's to be re-played, the following steps could be ran:
|
||||
The feature can be used via program args. For example, if there are breaking changes in the API's
|
||||
sql schema, like adding a new column which requires event's to be re-played, the following steps
|
||||
could be ran:
|
||||
|
||||
### Event Replay Instructions
|
||||
|
||||
1. Ensure the API process is not running. When stopping the API, let the process exit gracefully so that any in-progress SQL writes can finish.
|
||||
#### V1 BNS Data
|
||||
|
||||
**Optional but recommended** - If you want the V1 BNS data, there are going to be a few extra steps:
|
||||
|
||||
1. Download BNS data:
|
||||
```shell
|
||||
curl -L https://storage.googleapis.com/blockstack-v1-migration-data/export-data.tar.gz -o /stacks-node/bns/export-data.tar.gz
|
||||
```
|
||||
1. Extract it:
|
||||
```shell
|
||||
tar -xzvf ./bns/export-data.tar.gz -C /stacks-node/bns/
|
||||
```
|
||||
1. Each file in `./bns` will have a corresponding `sha256` value. To Verify, run a script like the
|
||||
following to check the sha256sum:
|
||||
|
||||
```bash
|
||||
for file in `ls /stacks-node/bns/* | grep -v sha256 | grep -v .tar.gz`; do
|
||||
if [ $(sha256sum $file | awk {'print $1'}) == $(cat ${file}.sha256 ) ]; then
|
||||
echo "sha256 Matched $file"
|
||||
else
|
||||
echo "sha256 Mismatch $file"
|
||||
fi
|
||||
done
|
||||
```
|
||||
1. Set the data's location as the value of `BNS_IMPORT_DIR` in your `.env` file.
|
||||
|
||||
#### Export and Import
|
||||
|
||||
1. Ensure the API process is not running. When stopping the API, let the process exit gracefully so
|
||||
that any in-progress SQL writes can finish.
|
||||
1. Export event data to disk with the `export-events` command:
|
||||
|
||||
```shell
|
||||
@@ -119,19 +151,25 @@ event's to be re-played, the following steps could be ran:
|
||||
1. Update to the new stacks-blockchain-api version.
|
||||
1. Perform the event playback using the `import-events` command:
|
||||
|
||||
**WARNING**: This will **drop _all_ tables** from the configured Postgres database, including any tables not automatically added by the API.
|
||||
**WARNING**: This will **drop _all_ tables** from the configured Postgres database, including any
|
||||
tables not automatically added by the API.
|
||||
|
||||
```shell
|
||||
node ./lib/index.js import-events --file /tmp/stacks-node-events.tsv --wipe-db --force
|
||||
```
|
||||
|
||||
This command has two modes of operation, specified by the `--mode` option:
|
||||
* `archival` (default): The process will import and ingest *all* blockchain events that have happened since the first block.
|
||||
* `pruned`: The import process will ignore some prunable events (mempool, microblocks) until the import block height has reached `chain tip - 256` blocks. This saves a considerable amount of time during import, but sacrifices some historical data. You can use this mode if you're mostly interested in running an API that prioritizes real time information.
|
||||
* `archival` (default): The process will import and ingest *all* blockchain events that have
|
||||
happened since the first block.
|
||||
* `pruned`: The import process will ignore some prunable events (mempool, microblocks) until the
|
||||
import block height has reached `chain tip - 256` blocks. This saves a considerable amount of
|
||||
time during import, but sacrifices some historical data. You can use this mode if you're mostly
|
||||
interested in running an API that prioritizes real time information.
|
||||
|
||||
Alternatively, instead of performing the `export-events` command in step 1, an environmental variable can be set which enables events to be streamed to a file
|
||||
as they are received, while the application is running normally. To enable this feature, set the `STACKS_EXPORT_EVENTS_FILE` env var to the file path where
|
||||
events should be appended. Example:
|
||||
Alternatively, instead of performing the `export-events` command in step 1, an environmental
|
||||
variable can be set which enables events to be streamed to a file as they are received, while the
|
||||
application is running normally. To enable this feature, set the `STACKS_EXPORT_EVENTS_FILE` env var
|
||||
to the file path where events should be appended. Example:
|
||||
```
|
||||
STACKS_EXPORT_EVENTS_FILE=/tmp/stacks-node-events.tsv
|
||||
```
|
||||
|
||||
@@ -78,7 +78,7 @@ Since we'll need to create some files/dirs for persistent data we'll first creat
|
||||
We'll be using:
|
||||
|
||||
```bash
|
||||
$ mkdir -p ./stacks-node/{persistent-data/postgres,persistent-data/stacks-blockchain,bns,config}
|
||||
$ mkdir -p ./stacks-node/{persistent-data/postgres,persistent-data/stacks-blockchain,config}
|
||||
$ docker pull blockstack/stacks-blockchain-api \
|
||||
&& docker pull blockstack/stacks-blockchain \
|
||||
&& docker pull postgres:14-alpine
|
||||
@@ -86,26 +86,6 @@ $ docker network create stacks-blockchain > /dev/null 2>&1
|
||||
$ cd ./stacks-node
|
||||
```
|
||||
|
||||
**Optional but recommended**: If you need the v1 BNS data, there are going to be a few extra steps.
|
||||
|
||||
1. Download the BNS data:
|
||||
`curl -L https://storage.googleapis.com/blockstack-v1-migration-data/export-data.tar.gz -o ./bns/export-data.tar.gz`
|
||||
2. Extract the data:
|
||||
`tar -xzvf ./bns/export-data.tar.gz -C ./bns/`
|
||||
3. Each file in `./bns` will have a corresponding `sha256` value.
|
||||
|
||||
To Verify, run a script like the following to check the sha256sum:
|
||||
|
||||
```bash
|
||||
for file in `ls ./bns/* | grep -v sha256 | grep -v .tar.gz`; do
|
||||
if [ $(sha256sum $file | awk {'print $1'}) == $(cat ${file}.sha256 ) ]; then
|
||||
echo "sha256 Matched $file"
|
||||
else
|
||||
echo "sha256 Mismatch $file"
|
||||
fi
|
||||
done
|
||||
```
|
||||
|
||||
## Postgres
|
||||
|
||||
The `postgres:14-alpine` image can be run with default settings, the only requirement is that a password Environment Variable is set for the `postgres` user: `POSTGRES_PASSWORD=postgres`
|
||||
@@ -161,16 +141,9 @@ STACKS_BLOCKCHAIN_API_PORT=3999
|
||||
STACKS_BLOCKCHAIN_API_HOST=0.0.0.0
|
||||
STACKS_CORE_RPC_HOST=stacks-blockchain
|
||||
STACKS_CORE_RPC_PORT=20443
|
||||
BNS_IMPORT_DIR=/bns-data
|
||||
API_DOCS_URL=https://docs.hiro.so/api
|
||||
```
|
||||
|
||||
**Note** that here we are importing the bns data with the env var `BNS_IMPORT`.
|
||||
|
||||
To Disable this import, simply comment the line: `#BNS_IMPORT_DIR=/bns-data`
|
||||
|
||||
***If you leave this enabled***: please allow several minutes for the one-time import to complete before continuing.
|
||||
|
||||
The other Environment Variables to pay attention to:
|
||||
|
||||
- `PG_HOST`: Set this to your **postgres** instance. In this guide, we'll be using a container named `postgres`.
|
||||
@@ -184,7 +157,6 @@ docker run -d --rm \
|
||||
--name stacks-blockchain-api \
|
||||
--net=stacks-blockchain \
|
||||
--env-file $(pwd)/.env \
|
||||
-v $(pwd)/bns:/bns-data \
|
||||
-p 3700:3700 \
|
||||
-p 3999:3999 \
|
||||
blockstack/stacks-blockchain-api
|
||||
|
||||
@@ -35,7 +35,7 @@ Since we'll need to create some files/dirs for persistent data,
|
||||
we'll first create a base directory structure and set some permissions:
|
||||
|
||||
```bash
|
||||
$ sudo mkdir -p /stacks-node/{persistent-data/stacks-blockchain,bns,config,binaries}
|
||||
$ sudo mkdir -p /stacks-node/{persistent-data/stacks-blockchain,config,binaries}
|
||||
$ sudo chown -R $(whoami) /stacks-node
|
||||
$ cd /stacks-node
|
||||
```
|
||||
@@ -65,26 +65,6 @@ $ PG_VERSION=14 \
|
||||
nodejs
|
||||
```
|
||||
|
||||
**Optional but recommended** - If you want the V1 BNS data, there are going to be a few extra steps:
|
||||
|
||||
1. Download the BNS data:
|
||||
`curl -L https://storage.googleapis.com/blockstack-v1-migration-data/export-data.tar.gz -o /stacks-node/bns/export-data.tar.gz`
|
||||
2. Extract the data:
|
||||
`tar -xzvf ./bns/export-data.tar.gz -C /stacks-node/bns/`
|
||||
3. Each file in `./bns` will have a corresponding `sha256` value.
|
||||
|
||||
To Verify, run a script like the following to check the sha256sum:
|
||||
|
||||
```bash
|
||||
for file in `ls /stacks-node/bns/* | grep -v sha256 | grep -v .tar.gz`; do
|
||||
if [ $(sha256sum $file | awk {'print $1'}) == $(cat ${file}.sha256 ) ]; then
|
||||
echo "sha256 Matched $file"
|
||||
else
|
||||
echo "sha256 Mismatch $file"
|
||||
fi
|
||||
done
|
||||
```
|
||||
|
||||
## postgres
|
||||
|
||||
### postgres permissions
|
||||
@@ -127,8 +107,6 @@ $ git clone https://github.com/hirosystems/stacks-blockchain-api /stacks-node/st
|
||||
The stacks blockchain api requires several Environment Variables to be set in order to run properly.
|
||||
To reduce complexity, we're going to create a `.env` file that we'll use for these env vars.
|
||||
|
||||
** Note: ** to enable BNS names, uncomment `BNS_IMPORT_DIR` in the below `.env` file.
|
||||
|
||||
Create a new file: `/stacks-node/stacks-blockchain-api/.env` with the following content:
|
||||
|
||||
```bash
|
||||
@@ -148,7 +126,6 @@ STACKS_BLOCKCHAIN_API_PORT=3999
|
||||
STACKS_BLOCKCHAIN_API_HOST=0.0.0.0
|
||||
STACKS_CORE_RPC_HOST=localhost
|
||||
STACKS_CORE_RPC_PORT=20443
|
||||
#BNS_IMPORT_DIR=/stacks-node/bns
|
||||
EOF
|
||||
$ cd /stacks-node/stacks-blockchain-api && nohup node ./lib/index.js &
|
||||
```
|
||||
|
||||
@@ -3,13 +3,20 @@ import { asyncHandler } from '../../async-handler';
|
||||
import { PgStore } from '../../../datastore/pg-store';
|
||||
import { isUnanchoredRequest } from '../../query-helpers';
|
||||
import { ChainID } from '@stacks/transactions';
|
||||
import {
|
||||
getETagCacheHandler,
|
||||
setETagCacheHeaders,
|
||||
} from '../../../api/controllers/cache-controller';
|
||||
|
||||
const SUPPORTED_BLOCKCHAINS = ['stacks'];
|
||||
|
||||
export function createBnsAddressesRouter(db: PgStore, chainId: ChainID): express.Router {
|
||||
const router = express.Router();
|
||||
const cacheHandler = getETagCacheHandler(db);
|
||||
|
||||
router.get(
|
||||
'/:blockchain/:address',
|
||||
cacheHandler,
|
||||
asyncHandler(async (req, res, next) => {
|
||||
// Retrieves a list of names owned by the address provided.
|
||||
const { blockchain, address } = req.params;
|
||||
@@ -23,6 +30,7 @@ export function createBnsAddressesRouter(db: PgStore, chainId: ChainID): express
|
||||
includeUnanchored,
|
||||
chainId,
|
||||
});
|
||||
setETagCacheHeaders(res);
|
||||
if (namesByAddress.found) {
|
||||
res.json({ names: namesByAddress.result });
|
||||
} else {
|
||||
|
||||
@@ -3,92 +3,85 @@ import { asyncHandler } from '../../async-handler';
|
||||
import { PgStore } from '../../../datastore/pg-store';
|
||||
import { parsePagingQueryInput } from '../../../api/pagination';
|
||||
import { isUnanchoredRequest } from '../../query-helpers';
|
||||
import { bnsBlockchain, BnsErrors } from '../../../bns-constants';
|
||||
import { bnsBlockchain, BnsErrors } from '../../../event-stream/bns/bns-constants';
|
||||
import { BnsGetNameInfoResponse } from '@stacks/stacks-blockchain-api-types';
|
||||
import { ChainID } from '@stacks/transactions';
|
||||
import {
|
||||
getETagCacheHandler,
|
||||
setETagCacheHeaders,
|
||||
} from '../../../api/controllers/cache-controller';
|
||||
|
||||
export function createBnsNamesRouter(db: PgStore, chainId: ChainID): express.Router {
|
||||
const router = express.Router();
|
||||
const cacheHandler = getETagCacheHandler(db);
|
||||
|
||||
router.get(
|
||||
'/:name/zonefile/:zoneFileHash',
|
||||
cacheHandler,
|
||||
asyncHandler(async (req, res, next) => {
|
||||
// Fetches the historical zonefile specified by the username and zone hash.
|
||||
const { name, zoneFileHash } = req.params;
|
||||
const includeUnanchored = isUnanchoredRequest(req, res, next);
|
||||
let nameFound = false;
|
||||
const nameQuery = await db.getName({ name: name, includeUnanchored, chainId: chainId });
|
||||
nameFound = nameQuery.found;
|
||||
if (!nameFound) {
|
||||
const subdomainQuery = await db.getSubdomain({ subdomain: name, includeUnanchored });
|
||||
nameFound = subdomainQuery.found;
|
||||
}
|
||||
|
||||
if (nameFound) {
|
||||
const zonefile = await db.getHistoricalZoneFile({ name: name, zoneFileHash: zoneFileHash });
|
||||
if (zonefile.found) {
|
||||
res.json(zonefile.result);
|
||||
} else {
|
||||
res.status(404).json({ error: 'No such zonefile' });
|
||||
}
|
||||
const zonefile = await db.getHistoricalZoneFile({
|
||||
name: name,
|
||||
zoneFileHash: zoneFileHash,
|
||||
includeUnanchored,
|
||||
});
|
||||
if (zonefile.found) {
|
||||
setETagCacheHeaders(res);
|
||||
res.json(zonefile.result);
|
||||
} else {
|
||||
res.status(400).json({ error: 'Invalid name or subdomain' });
|
||||
res.status(404).json({ error: 'No such name or zonefile' });
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
router.get(
|
||||
'/:name/subdomains',
|
||||
cacheHandler,
|
||||
asyncHandler(async (req, res, next) => {
|
||||
const { name } = req.params;
|
||||
const includeUnanchored = isUnanchoredRequest(req, res, next);
|
||||
const subdomainsList = await db.getSubdomainsListInName({ name, includeUnanchored });
|
||||
setETagCacheHeaders(res);
|
||||
res.json(subdomainsList.results);
|
||||
})
|
||||
);
|
||||
|
||||
router.get(
|
||||
'/:name/zonefile',
|
||||
cacheHandler,
|
||||
asyncHandler(async (req, res, next) => {
|
||||
// Fetch a user’s raw zone file. This only works for RFC-compliant zone files. This method returns an error for names that have non-standard zone files.
|
||||
const { name } = req.params;
|
||||
const includeUnanchored = isUnanchoredRequest(req, res, next);
|
||||
let nameFound = false;
|
||||
const nameQuery = await db.getName({ name: name, includeUnanchored, chainId: chainId });
|
||||
nameFound = nameQuery.found;
|
||||
if (!nameFound) {
|
||||
const subdomainQuery = await db.getSubdomain({ subdomain: name, includeUnanchored });
|
||||
nameFound = subdomainQuery.found;
|
||||
}
|
||||
|
||||
if (nameFound) {
|
||||
const zonefile = await db.getLatestZoneFile({ name: name, includeUnanchored });
|
||||
if (zonefile.found) {
|
||||
res.json(zonefile.result);
|
||||
} else {
|
||||
res.status(404).json({ error: 'No zone file for name' });
|
||||
}
|
||||
const zonefile = await db.getLatestZoneFile({ name: name, includeUnanchored });
|
||||
if (zonefile.found) {
|
||||
setETagCacheHeaders(res);
|
||||
res.json(zonefile.result);
|
||||
} else {
|
||||
res.status(400).json({ error: 'Invalid name or subdomain' });
|
||||
res.status(404).json({ error: 'No such name or zonefile does not exist' });
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
router.get(
|
||||
'/',
|
||||
cacheHandler,
|
||||
asyncHandler(async (req, res, next) => {
|
||||
const page = parsePagingQueryInput(req.query.page ?? 0);
|
||||
const includeUnanchored = isUnanchoredRequest(req, res, next);
|
||||
const { results } = await db.getNamesList({ page, includeUnanchored });
|
||||
if (results.length === 0 && req.query.page) {
|
||||
res.status(400).json(BnsErrors.InvalidPageNumber);
|
||||
} else {
|
||||
setETagCacheHeaders(res);
|
||||
res.json(results);
|
||||
}
|
||||
res.json(results);
|
||||
})
|
||||
);
|
||||
|
||||
router.get(
|
||||
'/:name',
|
||||
cacheHandler,
|
||||
asyncHandler(async (req, res, next) => {
|
||||
const { name } = req.params;
|
||||
const includeUnanchored = isUnanchoredRequest(req, res, next);
|
||||
@@ -105,7 +98,6 @@ export function createBnsNamesRouter(db: PgStore, chainId: ChainID): express.Rou
|
||||
return;
|
||||
}
|
||||
res.redirect(`${resolverResult.result}/v1/names${req.url}`);
|
||||
next();
|
||||
return;
|
||||
}
|
||||
res.status(404).json({ error: `cannot find subdomain ${name}` });
|
||||
@@ -149,6 +141,7 @@ export function createBnsNamesRouter(db: PgStore, chainId: ChainID): express.Rou
|
||||
const response = Object.fromEntries(
|
||||
Object.entries(nameInfoResponse).filter(([_, v]) => v != null)
|
||||
);
|
||||
setETagCacheHeaders(res);
|
||||
res.json(response);
|
||||
})
|
||||
);
|
||||
|
||||
@@ -3,20 +3,27 @@ import { asyncHandler } from '../../async-handler';
|
||||
import { PgStore } from '../../../datastore/pg-store';
|
||||
import { parsePagingQueryInput } from '../../../api/pagination';
|
||||
import { isUnanchoredRequest } from '../../query-helpers';
|
||||
import { BnsErrors } from '../../../bns-constants';
|
||||
import { BnsErrors } from '../../../event-stream/bns/bns-constants';
|
||||
import { BnsGetAllNamespacesResponse } from '@stacks/stacks-blockchain-api-types';
|
||||
import {
|
||||
getETagCacheHandler,
|
||||
setETagCacheHeaders,
|
||||
} from '../../../api/controllers/cache-controller';
|
||||
|
||||
export function createBnsNamespacesRouter(db: PgStore): express.Router {
|
||||
const router = express.Router();
|
||||
const cacheHandler = getETagCacheHandler(db);
|
||||
|
||||
router.get(
|
||||
'/',
|
||||
cacheHandler,
|
||||
asyncHandler(async (req, res, next) => {
|
||||
const includeUnanchored = isUnanchoredRequest(req, res, next);
|
||||
const { results } = await db.getNamespaceList({ includeUnanchored });
|
||||
const response: BnsGetAllNamespacesResponse = {
|
||||
namespaces: results,
|
||||
};
|
||||
setETagCacheHeaders(res);
|
||||
res.json(response);
|
||||
return;
|
||||
})
|
||||
@@ -24,6 +31,7 @@ export function createBnsNamespacesRouter(db: PgStore): express.Router {
|
||||
|
||||
router.get(
|
||||
'/:tld/names',
|
||||
cacheHandler,
|
||||
asyncHandler(async (req, res, next) => {
|
||||
const { tld } = req.params;
|
||||
const page = parsePagingQueryInput(req.query.page ?? 0);
|
||||
@@ -39,8 +47,10 @@ export function createBnsNamespacesRouter(db: PgStore): express.Router {
|
||||
});
|
||||
if (results.length === 0 && req.query.page) {
|
||||
res.status(400).json(BnsErrors.InvalidPageNumber);
|
||||
} else {
|
||||
setETagCacheHeaders(res);
|
||||
res.json(results);
|
||||
}
|
||||
res.json(results);
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
@@ -10,13 +10,13 @@ import {
|
||||
ClarityType,
|
||||
ChainID,
|
||||
} from '@stacks/transactions';
|
||||
import { GetStacksNetwork, getBnsContractID } from './../../../bns-helpers';
|
||||
import {
|
||||
BnsGetNamePriceResponse,
|
||||
BnsGetNamespacePriceResponse,
|
||||
} from '@stacks/stacks-blockchain-api-types';
|
||||
import { isValidPrincipal, logger } from './../../../helpers';
|
||||
import { PgStore } from '../../../datastore/pg-store';
|
||||
import { getBnsContractID, GetStacksNetwork } from '../../../event-stream/bns/bns-helpers';
|
||||
|
||||
export function createBnsPriceRouter(db: PgStore, chainId: ChainID): express.Router {
|
||||
const router = express.Router();
|
||||
|
||||
@@ -419,6 +419,31 @@ export interface DataStoreTxEventData {
|
||||
namespaces: DbBnsNamespace[];
|
||||
}
|
||||
|
||||
export interface DataStoreAttachmentData {
|
||||
op: string;
|
||||
name: string;
|
||||
namespace: string;
|
||||
zonefile: string;
|
||||
zonefileHash: string;
|
||||
txId: string;
|
||||
indexBlockHash: string;
|
||||
blockHeight: number;
|
||||
}
|
||||
|
||||
export interface DataStoreBnsBlockData {
|
||||
index_block_hash: string;
|
||||
parent_index_block_hash: string;
|
||||
microblock_hash: string;
|
||||
microblock_sequence: number;
|
||||
microblock_canonical: boolean;
|
||||
}
|
||||
|
||||
export interface DataStoreAttachmentSubdomainData {
|
||||
attachment?: DataStoreAttachmentData;
|
||||
blockData?: DataStoreBnsBlockData;
|
||||
subdomains?: DbBnsSubdomain[];
|
||||
}
|
||||
|
||||
export interface DbSearchResult {
|
||||
entity_type: 'standard_address' | 'contract_address' | 'block_hash' | 'tx_id' | 'mempool_tx_id';
|
||||
entity_id: string;
|
||||
@@ -463,6 +488,7 @@ export interface DbInboundStxTransfer {
|
||||
export interface DbBnsZoneFile {
|
||||
zonefile: string;
|
||||
}
|
||||
|
||||
export interface DbBnsNamespace {
|
||||
id?: number;
|
||||
namespace_id: string;
|
||||
@@ -471,8 +497,8 @@ export interface DbBnsNamespace {
|
||||
reveal_block: number;
|
||||
ready_block: number;
|
||||
buckets: string;
|
||||
base: number;
|
||||
coeff: number;
|
||||
base: bigint;
|
||||
coeff: bigint;
|
||||
nonalpha_discount: number;
|
||||
no_vowel_discount: number;
|
||||
lifetime: number;
|
||||
@@ -1117,8 +1143,8 @@ export interface BnsNamespaceInsertValues {
|
||||
reveal_block: number;
|
||||
ready_block: number;
|
||||
buckets: string;
|
||||
base: number;
|
||||
coeff: number;
|
||||
base: bigint;
|
||||
coeff: bigint;
|
||||
nonalpha_discount: number;
|
||||
no_vowel_discount: number;
|
||||
lifetime: number;
|
||||
@@ -1134,8 +1160,11 @@ export interface BnsNamespaceInsertValues {
|
||||
}
|
||||
|
||||
export interface BnsZonefileInsertValues {
|
||||
name: string;
|
||||
zonefile: string;
|
||||
zonefile_hash: string;
|
||||
tx_id: PgBytea;
|
||||
index_block_hash: PgBytea;
|
||||
}
|
||||
|
||||
export interface FaucetRequestInsertValues {
|
||||
|
||||
@@ -62,6 +62,7 @@ export async function* getRawEventRequests(
|
||||
onStatusUpdate?.('Importing raw event requests into temporary table...');
|
||||
const importStream = client.query(pgCopyStreams.from(`COPY temp_raw_tsv FROM STDIN`));
|
||||
await pipelineAsync(readStream, importStream);
|
||||
onStatusUpdate?.('Removing any duplicate raw event requests...');
|
||||
await client.query(`
|
||||
INSERT INTO temp_event_observer_requests
|
||||
SELECT *
|
||||
|
||||
@@ -208,22 +208,6 @@ export class PgStore {
|
||||
};
|
||||
}
|
||||
|
||||
async getTxStrict(args: { txId: string; indexBlockHash: string }): Promise<FoundOrNot<DbTx>> {
|
||||
const result = await this.sql<ContractTxQueryResult[]>`
|
||||
SELECT ${unsafeCols(this.sql, [...TX_COLUMNS, abiColumn()])}
|
||||
FROM txs
|
||||
WHERE tx_id = ${args.txId} AND index_block_hash = ${args.indexBlockHash}
|
||||
ORDER BY canonical DESC, microblock_canonical DESC, block_height DESC
|
||||
LIMIT 1
|
||||
`;
|
||||
if (result.length === 0) {
|
||||
return { found: false } as const;
|
||||
}
|
||||
const row = result[0];
|
||||
const tx = parseTxQueryResult(row);
|
||||
return { found: true, result: tx };
|
||||
}
|
||||
|
||||
async getBlockWithMetadata<TWithTxs extends boolean, TWithMicroblocks extends boolean>(
|
||||
blockIdentifer: BlockIdentifier,
|
||||
metadata?: DbGetBlockWithMetadataOpts<TWithTxs, TWithMicroblocks>
|
||||
@@ -3167,7 +3151,7 @@ export class PgStore {
|
||||
FROM namespaces
|
||||
WHERE canonical = true AND microblock_canonical = true
|
||||
AND ready_block <= ${maxBlockHeight}
|
||||
ORDER BY namespace_id, ready_block DESC, tx_index DESC
|
||||
ORDER BY namespace_id, ready_block DESC, microblock_sequence DESC, tx_index DESC
|
||||
`;
|
||||
});
|
||||
const results = queryResult.map(r => r.namespace_id);
|
||||
@@ -3194,7 +3178,7 @@ export class PgStore {
|
||||
WHERE namespace_id = ${namespace}
|
||||
AND registered_at <= ${maxBlockHeight}
|
||||
AND canonical = true AND microblock_canonical = true
|
||||
ORDER BY name, registered_at DESC, tx_index DESC
|
||||
ORDER BY name, registered_at DESC, microblock_sequence DESC, tx_index DESC
|
||||
LIMIT 100
|
||||
OFFSET ${offset}
|
||||
`;
|
||||
@@ -3218,7 +3202,7 @@ export class PgStore {
|
||||
WHERE namespace_id = ${namespace}
|
||||
AND ready_block <= ${maxBlockHeight}
|
||||
AND canonical = true AND microblock_canonical = true
|
||||
ORDER BY namespace_id, ready_block DESC, tx_index DESC
|
||||
ORDER BY namespace_id, ready_block DESC, microblock_sequence DESC, tx_index DESC
|
||||
LIMIT 1
|
||||
`;
|
||||
});
|
||||
@@ -3247,40 +3231,19 @@ export class PgStore {
|
||||
const queryResult = await this.sql.begin(async sql => {
|
||||
const maxBlockHeight = await this.getMaxBlockHeight(sql, { includeUnanchored });
|
||||
const nameZonefile = await sql<(DbBnsName & { tx_id: string; index_block_hash: string })[]>`
|
||||
SELECT DISTINCT ON (names.name) names.name, names.*, zonefiles.zonefile
|
||||
FROM names
|
||||
LEFT JOIN zonefiles ON names.zonefile_hash = zonefiles.zonefile_hash
|
||||
WHERE name = ${name}
|
||||
AND registered_at <= ${maxBlockHeight}
|
||||
AND canonical = true AND microblock_canonical = true
|
||||
ORDER BY name, registered_at DESC, tx_index DESC
|
||||
SELECT n.*, z.zonefile
|
||||
FROM names AS n
|
||||
LEFT JOIN zonefiles AS z USING (name, tx_id, index_block_hash)
|
||||
WHERE n.name = ${name}
|
||||
AND n.registered_at <= ${maxBlockHeight}
|
||||
AND n.canonical = true
|
||||
AND n.microblock_canonical = true
|
||||
ORDER BY n.registered_at DESC, n.microblock_sequence DESC, n.tx_index DESC
|
||||
LIMIT 1
|
||||
`;
|
||||
if (nameZonefile.length === 0) {
|
||||
return;
|
||||
}
|
||||
// The `names` and `zonefiles` tables only track latest zonefile changes. We need to check
|
||||
// `nft_custody` for the latest name owner, but only for names that were NOT imported from v1
|
||||
// since they did not generate an NFT event for us to track.
|
||||
if (nameZonefile[0].registered_at !== 0) {
|
||||
let value: string;
|
||||
try {
|
||||
value = bnsNameCV(name);
|
||||
} catch (error) {
|
||||
return;
|
||||
}
|
||||
const nameCustody = await sql<{ recipient: string }[]>`
|
||||
SELECT recipient
|
||||
FROM ${includeUnanchored ? sql`nft_custody_unanchored` : sql`nft_custody`}
|
||||
WHERE asset_identifier = ${getBnsSmartContractId(chainId)} AND value = ${value}
|
||||
`;
|
||||
if (nameCustody.length === 0) {
|
||||
return;
|
||||
}
|
||||
return {
|
||||
...nameZonefile[0],
|
||||
address: nameCustody[0].recipient,
|
||||
};
|
||||
}
|
||||
return nameZonefile[0];
|
||||
});
|
||||
if (queryResult) {
|
||||
@@ -3299,21 +3262,48 @@ export class PgStore {
|
||||
async getHistoricalZoneFile(args: {
|
||||
name: string;
|
||||
zoneFileHash: string;
|
||||
includeUnanchored: boolean;
|
||||
}): Promise<FoundOrNot<DbBnsZoneFile>> {
|
||||
const validZonefileHash = validateZonefileHash(args.zoneFileHash);
|
||||
const queryResult = await this.sql<{ zonefile: string }[]>`
|
||||
SELECT zonefile
|
||||
FROM names
|
||||
LEFT JOIN zonefiles ON zonefiles.zonefile_hash = names.zonefile_hash
|
||||
WHERE name = ${args.name}
|
||||
AND names.zonefile_hash = ${validZonefileHash}
|
||||
UNION ALL
|
||||
SELECT zonefile
|
||||
FROM subdomains
|
||||
LEFT JOIN zonefiles ON zonefiles.zonefile_hash = subdomains.zonefile_hash
|
||||
WHERE fully_qualified_subdomain = ${args.name}
|
||||
AND subdomains.zonefile_hash = ${validZonefileHash}
|
||||
`;
|
||||
const queryResult = await this.sql.begin(async sql => {
|
||||
const maxBlockHeight = await this.getMaxBlockHeight(sql, {
|
||||
includeUnanchored: args.includeUnanchored,
|
||||
});
|
||||
const validZonefileHash = validateZonefileHash(args.zoneFileHash);
|
||||
// Depending on the kind of name we got, use the correct table to pivot on canonical chain
|
||||
// state to get the zonefile. We can't pivot on the `txs` table because some names/subdomains
|
||||
// were imported from Stacks v1 and they don't have an associated tx.
|
||||
const isSubdomain = args.name.split('.').length > 2;
|
||||
if (isSubdomain) {
|
||||
return sql<{ zonefile: string }[]>`
|
||||
SELECT zonefile
|
||||
FROM zonefiles AS z
|
||||
INNER JOIN subdomains AS s ON
|
||||
s.fully_qualified_subdomain = z.name
|
||||
AND s.tx_id = z.tx_id
|
||||
AND s.index_block_hash = z.index_block_hash
|
||||
WHERE z.name = ${args.name}
|
||||
AND z.zonefile_hash = ${validZonefileHash}
|
||||
AND s.canonical = TRUE
|
||||
AND s.microblock_canonical = TRUE
|
||||
AND s.block_height <= ${maxBlockHeight}
|
||||
ORDER BY s.block_height DESC, s.microblock_sequence DESC, s.tx_index DESC
|
||||
LIMIT 1
|
||||
`;
|
||||
} else {
|
||||
return sql<{ zonefile: string }[]>`
|
||||
SELECT zonefile
|
||||
FROM zonefiles AS z
|
||||
INNER JOIN names AS n USING (name, tx_id, index_block_hash)
|
||||
WHERE z.name = ${args.name}
|
||||
AND z.zonefile_hash = ${validZonefileHash}
|
||||
AND n.canonical = TRUE
|
||||
AND n.microblock_canonical = TRUE
|
||||
AND n.registered_at <= ${maxBlockHeight}
|
||||
ORDER BY n.registered_at DESC, n.microblock_sequence DESC, n.tx_index DESC
|
||||
LIMIT 1
|
||||
`;
|
||||
}
|
||||
});
|
||||
if (queryResult.length > 0) {
|
||||
return {
|
||||
found: true,
|
||||
@@ -3332,45 +3322,39 @@ export class PgStore {
|
||||
}): Promise<FoundOrNot<DbBnsZoneFile>> {
|
||||
const queryResult = await this.sql.begin(async sql => {
|
||||
const maxBlockHeight = await this.getMaxBlockHeight(sql, { includeUnanchored });
|
||||
const zonefileHashResult = await sql<{ name: string; zonefile: string }[]>`
|
||||
SELECT name, zonefile_hash as zonefile FROM (
|
||||
(
|
||||
SELECT DISTINCT ON (name) name, zonefile_hash
|
||||
FROM names
|
||||
WHERE name = ${name}
|
||||
AND registered_at <= ${maxBlockHeight}
|
||||
AND canonical = true AND microblock_canonical = true
|
||||
ORDER BY name, registered_at DESC, tx_index DESC
|
||||
LIMIT 1
|
||||
)
|
||||
UNION ALL (
|
||||
SELECT DISTINCT ON (fully_qualified_subdomain) fully_qualified_subdomain as name, zonefile_hash
|
||||
FROM subdomains
|
||||
WHERE fully_qualified_subdomain = ${name}
|
||||
AND block_height <= ${maxBlockHeight}
|
||||
AND canonical = true AND microblock_canonical = true
|
||||
ORDER BY fully_qualified_subdomain, block_height DESC, tx_index DESC
|
||||
LIMIT 1
|
||||
)
|
||||
) results
|
||||
LIMIT 1
|
||||
`;
|
||||
if (zonefileHashResult.length === 0) {
|
||||
return zonefileHashResult;
|
||||
// Depending on the kind of name we got, use the correct table to pivot on canonical chain
|
||||
// state to get the zonefile. We can't pivot on the `txs` table because some names/subdomains
|
||||
// were imported from Stacks v1 and they don't have an associated tx.
|
||||
const isSubdomain = name.split('.').length > 2;
|
||||
if (isSubdomain) {
|
||||
return sql<{ zonefile: string }[]>`
|
||||
SELECT zonefile
|
||||
FROM zonefiles AS z
|
||||
INNER JOIN subdomains AS s ON
|
||||
s.fully_qualified_subdomain = z.name
|
||||
AND s.tx_id = z.tx_id
|
||||
AND s.index_block_hash = z.index_block_hash
|
||||
WHERE z.name = ${name}
|
||||
AND s.canonical = TRUE
|
||||
AND s.microblock_canonical = TRUE
|
||||
AND s.block_height <= ${maxBlockHeight}
|
||||
ORDER BY s.block_height DESC, s.microblock_sequence DESC, s.tx_index DESC
|
||||
LIMIT 1
|
||||
`;
|
||||
} else {
|
||||
return sql<{ zonefile: string }[]>`
|
||||
SELECT zonefile
|
||||
FROM zonefiles AS z
|
||||
INNER JOIN names AS n USING (name, tx_id, index_block_hash)
|
||||
WHERE z.name = ${name}
|
||||
AND n.canonical = TRUE
|
||||
AND n.microblock_canonical = TRUE
|
||||
AND n.registered_at <= ${maxBlockHeight}
|
||||
ORDER BY n.registered_at DESC, n.microblock_sequence DESC, n.tx_index DESC
|
||||
LIMIT 1
|
||||
`;
|
||||
}
|
||||
const zonefileHash = zonefileHashResult[0].zonefile;
|
||||
const zonefileResult = await sql<{ zonefile: string }[]>`
|
||||
SELECT zonefile
|
||||
FROM zonefiles
|
||||
WHERE zonefile_hash = ${zonefileHash}
|
||||
`;
|
||||
if (zonefileResult.length === 0) {
|
||||
return zonefileHashResult;
|
||||
}
|
||||
zonefileHashResult[0].zonefile = zonefileResult[0].zonefile;
|
||||
return zonefileHashResult;
|
||||
});
|
||||
|
||||
if (queryResult.length > 0) {
|
||||
return {
|
||||
found: true,
|
||||
@@ -3426,7 +3410,7 @@ export class PgStore {
|
||||
names
|
||||
WHERE
|
||||
address = ${address}
|
||||
AND registered_at = 0
|
||||
AND registered_at = 1
|
||||
AND canonical = TRUE
|
||||
AND microblock_canonical = TRUE
|
||||
`;
|
||||
@@ -3478,8 +3462,10 @@ export class PgStore {
|
||||
return await sql<{ fully_qualified_subdomain: string }[]>`
|
||||
SELECT DISTINCT ON (fully_qualified_subdomain) fully_qualified_subdomain
|
||||
FROM subdomains
|
||||
WHERE name = ${name} AND block_height <= ${maxBlockHeight}
|
||||
AND canonical = true AND microblock_canonical = true
|
||||
WHERE name = ${name}
|
||||
AND block_height <= ${maxBlockHeight}
|
||||
AND canonical = true
|
||||
AND microblock_canonical = true
|
||||
ORDER BY fully_qualified_subdomain, block_height DESC, microblock_sequence DESC, tx_index DESC
|
||||
`;
|
||||
});
|
||||
@@ -3502,7 +3488,7 @@ export class PgStore {
|
||||
FROM subdomains
|
||||
WHERE block_height <= ${maxBlockHeight}
|
||||
AND canonical = true AND microblock_canonical = true
|
||||
ORDER BY fully_qualified_subdomain, block_height DESC, tx_index DESC
|
||||
ORDER BY fully_qualified_subdomain, block_height DESC, microblock_sequence DESC, tx_index DESC
|
||||
LIMIT 100
|
||||
OFFSET ${offset}
|
||||
`;
|
||||
@@ -3520,7 +3506,7 @@ export class PgStore {
|
||||
FROM names
|
||||
WHERE canonical = true AND microblock_canonical = true
|
||||
AND registered_at <= ${maxBlockHeight}
|
||||
ORDER BY name, registered_at DESC, tx_index DESC
|
||||
ORDER BY name, registered_at DESC, microblock_sequence DESC, tx_index DESC
|
||||
LIMIT 100
|
||||
OFFSET ${offset}
|
||||
`;
|
||||
@@ -3538,32 +3524,22 @@ export class PgStore {
|
||||
}): Promise<FoundOrNot<DbBnsSubdomain & { index_block_hash: string }>> {
|
||||
const queryResult = await this.sql.begin(async sql => {
|
||||
const maxBlockHeight = await this.getMaxBlockHeight(sql, { includeUnanchored });
|
||||
const subdomainResult = await sql<
|
||||
(DbBnsSubdomain & { tx_id: string; index_block_hash: string })[]
|
||||
>`
|
||||
SELECT DISTINCT ON(subdomains.fully_qualified_subdomain) subdomains.fully_qualified_subdomain, *
|
||||
FROM subdomains
|
||||
WHERE canonical = true AND microblock_canonical = true
|
||||
AND block_height <= ${maxBlockHeight}
|
||||
AND fully_qualified_subdomain = ${subdomain}
|
||||
ORDER BY fully_qualified_subdomain, block_height DESC, tx_index DESC
|
||||
return await sql<(DbBnsSubdomain & { tx_id: Buffer; index_block_hash: Buffer })[]>`
|
||||
SELECT s.*, z.zonefile
|
||||
FROM subdomains AS s
|
||||
LEFT JOIN zonefiles AS z
|
||||
ON z.name = s.fully_qualified_subdomain
|
||||
AND z.tx_id = s.tx_id
|
||||
AND z.index_block_hash = s.index_block_hash
|
||||
WHERE s.canonical = true
|
||||
AND s.microblock_canonical = true
|
||||
AND s.block_height <= ${maxBlockHeight}
|
||||
AND s.fully_qualified_subdomain = ${subdomain}
|
||||
ORDER BY s.block_height DESC, s.microblock_sequence DESC, s.tx_index DESC
|
||||
LIMIT 1
|
||||
`;
|
||||
if (subdomainResult.length === 0 || !subdomainResult[0].zonefile_hash) {
|
||||
return subdomainResult;
|
||||
}
|
||||
const zonefileHash = subdomainResult[0].zonefile_hash;
|
||||
const zonefileResult = await sql`
|
||||
SELECT zonefile
|
||||
FROM zonefiles
|
||||
WHERE zonefile_hash = ${zonefileHash}
|
||||
`;
|
||||
if (zonefileResult.length === 0) {
|
||||
return subdomainResult;
|
||||
}
|
||||
subdomainResult[0].zonefile = zonefileResult[0].zonefile;
|
||||
return subdomainResult;
|
||||
});
|
||||
if (queryResult.length > 0) {
|
||||
if (queryResult.length > 0 && !queryResult[0].zonefile_hash) {
|
||||
return {
|
||||
found: true,
|
||||
result: {
|
||||
@@ -3582,7 +3558,7 @@ export class PgStore {
|
||||
FROM subdomains
|
||||
WHERE canonical = true AND microblock_canonical = true
|
||||
AND name = ${args.name}
|
||||
ORDER BY name, block_height DESC, tx_index DESC
|
||||
ORDER BY name, block_height DESC, microblock_sequence DESC, tx_index DESC
|
||||
LIMIT 1
|
||||
`;
|
||||
if (queryResult.length > 0) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import * as fs from 'fs';
|
||||
import { logger, logError, getOrAdd, batchIterate, isProdEnv } from '../helpers';
|
||||
import { logger, logError, getOrAdd, batchIterate, isProdEnv, I32_MAX } from '../helpers';
|
||||
import {
|
||||
DbBlock,
|
||||
DbTx,
|
||||
@@ -55,6 +55,9 @@ import {
|
||||
TxQueryResult,
|
||||
UpdatedEntities,
|
||||
BlockQueryResult,
|
||||
DataStoreAttachmentData,
|
||||
DataStoreAttachmentSubdomainData,
|
||||
DataStoreBnsBlockData,
|
||||
} from './common';
|
||||
import { ClarityAbi } from '@stacks/transactions';
|
||||
import {
|
||||
@@ -75,6 +78,8 @@ import { connectPostgres, PgServer, PgSqlClient } from './connection';
|
||||
import { runMigrations } from './migrations';
|
||||
import { getPgClientConfig } from './connection-legacy';
|
||||
import { isProcessableTokenMetadata } from '../token-metadata/helpers';
|
||||
import * as zoneFileParser from 'zone-file';
|
||||
import { parseResolver, parseZoneFileTxt } from '../event-stream/bns/bns-helpers';
|
||||
|
||||
class MicroblockGapError extends Error {
|
||||
constructor(message: string) {
|
||||
@@ -351,12 +356,12 @@ export class PgWriteStore extends PgStore {
|
||||
for (const smartContract of entry.smartContracts) {
|
||||
await this.updateSmartContract(sql, entry.tx, smartContract);
|
||||
}
|
||||
for (const bnsName of entry.names) {
|
||||
await this.updateNames(sql, entry.tx, bnsName);
|
||||
}
|
||||
for (const namespace of entry.namespaces) {
|
||||
await this.updateNamespaces(sql, entry.tx, namespace);
|
||||
}
|
||||
for (const bnsName of entry.names) {
|
||||
await this.updateNames(sql, entry.tx, bnsName);
|
||||
}
|
||||
}
|
||||
await this.refreshNftCustody(sql, batchedTxData);
|
||||
await this.refreshMaterializedView(sql, 'chain_tip');
|
||||
@@ -762,79 +767,101 @@ export class PgWriteStore extends PgStore {
|
||||
}
|
||||
}
|
||||
|
||||
async updateBatchSubdomains(
|
||||
async updateBatchZonefiles(
|
||||
sql: PgSqlClient,
|
||||
blockData: {
|
||||
index_block_hash: string;
|
||||
parent_index_block_hash: string;
|
||||
microblock_hash: string;
|
||||
microblock_sequence: number;
|
||||
microblock_canonical: boolean;
|
||||
},
|
||||
subdomains: DbBnsSubdomain[]
|
||||
) {
|
||||
const subdomainValues: BnsSubdomainInsertValues[] = [];
|
||||
data: DataStoreAttachmentSubdomainData[]
|
||||
): Promise<void> {
|
||||
const zonefileValues: BnsZonefileInsertValues[] = [];
|
||||
for (const subdomain of subdomains) {
|
||||
let txIndex = subdomain.tx_index;
|
||||
if (txIndex === -1) {
|
||||
const txQuery = await sql<{ tx_index: number }[]>`
|
||||
SELECT tx_index from txs
|
||||
WHERE tx_id = ${subdomain.tx_id}
|
||||
AND index_block_hash = ${blockData.index_block_hash}
|
||||
AND block_height = ${subdomain.block_height}
|
||||
LIMIT 1
|
||||
`;
|
||||
if (txQuery.length === 0) {
|
||||
logger.warn(`Could not find tx index for subdomain entry: ${JSON.stringify(subdomain)}`);
|
||||
txIndex = 0;
|
||||
} else {
|
||||
txIndex = txQuery[0].tx_index;
|
||||
for (const dataItem of data) {
|
||||
if (dataItem.subdomains && dataItem.blockData) {
|
||||
for (const subdomain of dataItem.subdomains) {
|
||||
zonefileValues.push({
|
||||
name: subdomain.fully_qualified_subdomain,
|
||||
zonefile: subdomain.zonefile,
|
||||
zonefile_hash: validateZonefileHash(subdomain.zonefile_hash),
|
||||
tx_id: subdomain.tx_id,
|
||||
index_block_hash: dataItem.blockData.index_block_hash,
|
||||
});
|
||||
}
|
||||
}
|
||||
subdomainValues.push({
|
||||
name: subdomain.name,
|
||||
namespace_id: subdomain.namespace_id,
|
||||
fully_qualified_subdomain: subdomain.fully_qualified_subdomain,
|
||||
owner: subdomain.owner,
|
||||
zonefile_hash: validateZonefileHash(subdomain.zonefile_hash),
|
||||
parent_zonefile_hash: subdomain.parent_zonefile_hash,
|
||||
parent_zonefile_index: subdomain.parent_zonefile_index,
|
||||
block_height: subdomain.block_height,
|
||||
tx_index: txIndex,
|
||||
zonefile_offset: subdomain.zonefile_offset,
|
||||
resolver: subdomain.resolver,
|
||||
canonical: subdomain.canonical,
|
||||
tx_id: subdomain.tx_id,
|
||||
index_block_hash: blockData.index_block_hash,
|
||||
parent_index_block_hash: blockData.parent_index_block_hash,
|
||||
microblock_hash: blockData.microblock_hash,
|
||||
microblock_sequence: blockData.microblock_sequence,
|
||||
microblock_canonical: blockData.microblock_canonical,
|
||||
});
|
||||
zonefileValues.push({
|
||||
zonefile: subdomain.zonefile,
|
||||
zonefile_hash: validateZonefileHash(subdomain.zonefile_hash),
|
||||
});
|
||||
if (dataItem.attachment) {
|
||||
zonefileValues.push({
|
||||
name: `${dataItem.attachment.name}.${dataItem.attachment.namespace}`,
|
||||
zonefile: Buffer.from(dataItem.attachment.zonefile, 'hex').toString(),
|
||||
zonefile_hash: validateZonefileHash(dataItem.attachment.zonefileHash),
|
||||
tx_id: dataItem.attachment.txId,
|
||||
index_block_hash: dataItem.attachment.indexBlockHash,
|
||||
});
|
||||
}
|
||||
}
|
||||
try {
|
||||
const bnsRes = await sql`
|
||||
INSERT INTO subdomains ${sql(subdomainValues)}
|
||||
`;
|
||||
if (bnsRes.count !== subdomains.length) {
|
||||
throw new Error(`Expected ${subdomains.length} inserts, got ${bnsRes.count} for BNS`);
|
||||
if (zonefileValues.length === 0) {
|
||||
return;
|
||||
}
|
||||
const result = await sql`
|
||||
INSERT INTO zonefiles ${sql(zonefileValues)}
|
||||
ON CONFLICT ON CONSTRAINT unique_name_zonefile_hash_tx_id_index_block_hash DO
|
||||
UPDATE SET zonefile = EXCLUDED.zonefile
|
||||
`;
|
||||
if (result.count !== zonefileValues.length) {
|
||||
throw new Error(`Expected ${result.count} zonefile inserts, got ${zonefileValues.length}`);
|
||||
}
|
||||
}
|
||||
|
||||
async updateBatchSubdomains(
|
||||
sql: PgSqlClient,
|
||||
data: DataStoreAttachmentSubdomainData[]
|
||||
): Promise<void> {
|
||||
const subdomainValues: BnsSubdomainInsertValues[] = [];
|
||||
for (const dataItem of data) {
|
||||
if (dataItem.subdomains && dataItem.blockData) {
|
||||
for (const subdomain of dataItem.subdomains) {
|
||||
subdomainValues.push({
|
||||
name: subdomain.name,
|
||||
namespace_id: subdomain.namespace_id,
|
||||
fully_qualified_subdomain: subdomain.fully_qualified_subdomain,
|
||||
owner: subdomain.owner,
|
||||
zonefile_hash: validateZonefileHash(subdomain.zonefile_hash),
|
||||
parent_zonefile_hash: subdomain.parent_zonefile_hash,
|
||||
parent_zonefile_index: subdomain.parent_zonefile_index,
|
||||
block_height: subdomain.block_height,
|
||||
tx_index: subdomain.tx_index,
|
||||
zonefile_offset: subdomain.zonefile_offset,
|
||||
resolver: subdomain.resolver,
|
||||
canonical: subdomain.canonical,
|
||||
tx_id: subdomain.tx_id,
|
||||
index_block_hash: dataItem.blockData.index_block_hash,
|
||||
parent_index_block_hash: dataItem.blockData.parent_index_block_hash,
|
||||
microblock_hash: dataItem.blockData.microblock_hash,
|
||||
microblock_sequence: dataItem.blockData.microblock_sequence,
|
||||
microblock_canonical: dataItem.blockData.microblock_canonical,
|
||||
});
|
||||
}
|
||||
}
|
||||
const zonefilesRes = await sql`
|
||||
INSERT INTO zonefiles ${sql(zonefileValues)}
|
||||
`;
|
||||
if (zonefilesRes.count !== subdomains.length) {
|
||||
throw new Error(
|
||||
`Expected ${subdomains.length} inserts, got ${zonefilesRes.count} for zonefiles`
|
||||
);
|
||||
}
|
||||
} catch (e: any) {
|
||||
logError(`subdomain errors ${e.message}`, e);
|
||||
throw e;
|
||||
}
|
||||
if (subdomainValues.length === 0) {
|
||||
return;
|
||||
}
|
||||
const result = await sql`
|
||||
INSERT INTO subdomains ${sql(subdomainValues)}
|
||||
ON CONFLICT ON CONSTRAINT unique_fully_qualified_subdomain_tx_id_index_block_hash_microblock_hash DO
|
||||
UPDATE SET
|
||||
name = EXCLUDED.name,
|
||||
namespace_id = EXCLUDED.namespace_id,
|
||||
owner = EXCLUDED.owner,
|
||||
zonefile_hash = EXCLUDED.zonefile_hash,
|
||||
parent_zonefile_hash = EXCLUDED.parent_zonefile_hash,
|
||||
parent_zonefile_index = EXCLUDED.parent_zonefile_index,
|
||||
block_height = EXCLUDED.block_height,
|
||||
tx_index = EXCLUDED.tx_index,
|
||||
zonefile_offset = EXCLUDED.zonefile_offset,
|
||||
resolver = EXCLUDED.resolver,
|
||||
canonical = EXCLUDED.canonical,
|
||||
parent_index_block_hash = EXCLUDED.parent_index_block_hash,
|
||||
microblock_sequence = EXCLUDED.microblock_sequence,
|
||||
microblock_canonical = EXCLUDED.microblock_canonical
|
||||
`;
|
||||
if (result.count !== subdomainValues.length) {
|
||||
throw new Error(`Expected ${subdomainValues.length} subdomain inserts, got ${result.count}`);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -850,7 +877,8 @@ export class PgWriteStore extends PgStore {
|
||||
): Promise<void> {
|
||||
if (data.length == 0) return;
|
||||
await this.sql.begin(async sql => {
|
||||
await this.updateBatchSubdomains(sql, blockData, data);
|
||||
await this.updateBatchSubdomains(sql, [{ blockData, subdomains: data }]);
|
||||
await this.updateBatchZonefiles(sql, [{ blockData, subdomains: data }]);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -970,6 +998,90 @@ export class PgWriteStore extends PgStore {
|
||||
`;
|
||||
}
|
||||
|
||||
async updateAttachments(attachments: DataStoreAttachmentData[]): Promise<void> {
|
||||
await this.sql.begin(async sql => {
|
||||
// Each attachment will batch insert zonefiles for name and all subdomains that apply.
|
||||
for (const attachment of attachments) {
|
||||
const subdomainData: DataStoreAttachmentSubdomainData[] = [];
|
||||
if (attachment.op === 'name-update') {
|
||||
// If this is a zonefile update, break it down into subdomains and update all of them. We
|
||||
// must find the correct transaction that registered the zonefile in the first place and
|
||||
// associate it with each entry.
|
||||
const zonefile = Buffer.from(attachment.zonefile, 'hex').toString();
|
||||
const zoneFileContents = zoneFileParser.parseZoneFile(zonefile);
|
||||
const zoneFileTxt = zoneFileContents.txt;
|
||||
if (zoneFileTxt && zoneFileTxt.length > 0) {
|
||||
const dbTx = await sql<TxQueryResult[]>`
|
||||
SELECT ${sql(TX_COLUMNS)} FROM txs
|
||||
WHERE tx_id = ${attachment.txId} AND index_block_hash = ${attachment.indexBlockHash}
|
||||
ORDER BY canonical DESC, microblock_canonical DESC, block_height DESC
|
||||
LIMIT 1
|
||||
`;
|
||||
let isCanonical = true;
|
||||
let txIndex = -1;
|
||||
const blockData: DataStoreBnsBlockData = {
|
||||
index_block_hash: '',
|
||||
parent_index_block_hash: '',
|
||||
microblock_hash: '',
|
||||
microblock_sequence: I32_MAX,
|
||||
microblock_canonical: true,
|
||||
};
|
||||
if (dbTx.rowCount > 0) {
|
||||
const parsedDbTx = parseTxQueryResult(dbTx.rows[0]);
|
||||
isCanonical = parsedDbTx.canonical;
|
||||
txIndex = parsedDbTx.tx_index;
|
||||
blockData.index_block_hash = parsedDbTx.index_block_hash;
|
||||
blockData.parent_index_block_hash = parsedDbTx.parent_index_block_hash;
|
||||
blockData.microblock_hash = parsedDbTx.microblock_hash;
|
||||
blockData.microblock_sequence = parsedDbTx.microblock_sequence;
|
||||
blockData.microblock_canonical = parsedDbTx.microblock_canonical;
|
||||
} else {
|
||||
logger.warn(
|
||||
`Could not find transaction ${attachment.txId} associated with attachment`
|
||||
);
|
||||
}
|
||||
const subdomains: DbBnsSubdomain[] = [];
|
||||
for (let i = 0; i < zoneFileTxt.length; i++) {
|
||||
const zoneFile = zoneFileTxt[i];
|
||||
const parsedTxt = parseZoneFileTxt(zoneFile.txt);
|
||||
if (parsedTxt.owner === '') continue; //if txt has no owner , skip it
|
||||
const subdomain: DbBnsSubdomain = {
|
||||
name: attachment.name.concat('.', attachment.namespace),
|
||||
namespace_id: attachment.namespace,
|
||||
fully_qualified_subdomain: zoneFile.name.concat(
|
||||
'.',
|
||||
attachment.name,
|
||||
'.',
|
||||
attachment.namespace
|
||||
),
|
||||
owner: parsedTxt.owner,
|
||||
zonefile_hash: parsedTxt.zoneFileHash,
|
||||
zonefile: parsedTxt.zoneFile,
|
||||
tx_id: attachment.txId,
|
||||
tx_index: txIndex,
|
||||
canonical: isCanonical,
|
||||
parent_zonefile_hash: attachment.zonefileHash.slice(2),
|
||||
parent_zonefile_index: 0,
|
||||
block_height: attachment.blockHeight,
|
||||
zonefile_offset: 1,
|
||||
resolver: zoneFileContents.uri ? parseResolver(zoneFileContents.uri) : '',
|
||||
};
|
||||
subdomains.push(subdomain);
|
||||
}
|
||||
subdomainData.push({ blockData, subdomains, attachment: attachment });
|
||||
}
|
||||
}
|
||||
await this.updateBatchSubdomains(sql, subdomainData);
|
||||
await this.updateBatchZonefiles(sql, subdomainData);
|
||||
// Update the name's zonefile as well.
|
||||
await this.updateBatchZonefiles(sql, [{ attachment }]);
|
||||
}
|
||||
});
|
||||
for (const txId of attachments.map(a => a.txId)) {
|
||||
await this.notifier?.sendName({ nameInfo: txId });
|
||||
}
|
||||
}
|
||||
|
||||
async updateMicroCanonical(
|
||||
sql: PgSqlClient,
|
||||
blockData: {
|
||||
@@ -1060,19 +1172,6 @@ export class PgWriteStore extends PgStore {
|
||||
};
|
||||
}
|
||||
|
||||
async updateZoneContent(zonefile: string, zonefile_hash: string, tx_id: string): Promise<void> {
|
||||
await this.sql.begin(async sql => {
|
||||
// inserting zonefile into zonefiles table
|
||||
const validZonefileHash = validateZonefileHash(zonefile_hash);
|
||||
await sql`
|
||||
UPDATE zonefiles
|
||||
SET zonefile = ${zonefile}
|
||||
WHERE zonefile_hash = ${validZonefileHash}
|
||||
`;
|
||||
});
|
||||
await this.notifier?.sendName({ nameInfo: tx_id });
|
||||
}
|
||||
|
||||
async updateBurnchainRewards({
|
||||
burnchainBlockHash,
|
||||
burnchainBlockHeight,
|
||||
@@ -1300,25 +1399,90 @@ export class PgWriteStore extends PgStore {
|
||||
},
|
||||
bnsName: DbBnsName
|
||||
) {
|
||||
const validZonefileHash = validateZonefileHash(bnsName.zonefile_hash);
|
||||
const {
|
||||
name,
|
||||
address,
|
||||
registered_at,
|
||||
expire_block,
|
||||
zonefile,
|
||||
zonefile_hash,
|
||||
namespace_id,
|
||||
tx_id,
|
||||
tx_index,
|
||||
status,
|
||||
canonical,
|
||||
} = bnsName;
|
||||
// Try to figure out the name's expiration block based on its namespace's lifetime. However, if
|
||||
// the name was only transferred, keep the expiration from the last register/renewal we had.
|
||||
let expireBlock = expire_block;
|
||||
if (status === 'name-transfer') {
|
||||
const prevExpiration = await sql<{ expire_block: number }[]>`
|
||||
SELECT expire_block
|
||||
FROM names
|
||||
WHERE name = ${name}
|
||||
AND canonical = TRUE AND microblock_canonical = TRUE
|
||||
ORDER BY registered_at DESC, microblock_sequence DESC, tx_index DESC
|
||||
LIMIT 1
|
||||
`;
|
||||
if (prevExpiration.length > 0) {
|
||||
expireBlock = prevExpiration[0].expire_block;
|
||||
}
|
||||
} else {
|
||||
const namespaceLifetime = await sql<{ lifetime: number }[]>`
|
||||
SELECT lifetime
|
||||
FROM namespaces
|
||||
WHERE namespace_id = ${namespace_id}
|
||||
AND canonical = true AND microblock_canonical = true
|
||||
ORDER BY namespace_id, ready_block DESC, microblock_sequence DESC, tx_index DESC
|
||||
LIMIT 1
|
||||
`;
|
||||
if (namespaceLifetime.length > 0) {
|
||||
expireBlock = registered_at + namespaceLifetime[0].lifetime;
|
||||
}
|
||||
}
|
||||
// If we didn't receive a zonefile, keep the last valid one.
|
||||
let finalZonefile = zonefile;
|
||||
let finalZonefileHash = zonefile_hash;
|
||||
if (finalZonefileHash === '') {
|
||||
const lastZonefile = await sql<{ zonefile: string; zonefile_hash: string }[]>`
|
||||
SELECT z.zonefile, z.zonefile_hash
|
||||
FROM zonefiles AS z
|
||||
INNER JOIN names AS n USING (name, tx_id, index_block_hash)
|
||||
WHERE z.name = ${name}
|
||||
AND n.canonical = TRUE
|
||||
AND n.microblock_canonical = TRUE
|
||||
ORDER BY n.registered_at DESC, n.microblock_sequence DESC, n.tx_index DESC
|
||||
LIMIT 1
|
||||
`;
|
||||
if (lastZonefile.length > 0) {
|
||||
finalZonefile = lastZonefile[0].zonefile;
|
||||
finalZonefileHash = lastZonefile[0].zonefile_hash;
|
||||
}
|
||||
}
|
||||
const validZonefileHash = validateZonefileHash(finalZonefileHash);
|
||||
const zonefileValues: BnsZonefileInsertValues = {
|
||||
zonefile: bnsName.zonefile,
|
||||
name: name,
|
||||
zonefile: finalZonefile,
|
||||
zonefile_hash: validZonefileHash,
|
||||
tx_id: tx_id,
|
||||
index_block_hash: blockData.index_block_hash,
|
||||
};
|
||||
await sql`
|
||||
INSERT INTO zonefiles ${sql(zonefileValues)}
|
||||
ON CONFLICT ON CONSTRAINT unique_name_zonefile_hash_tx_id_index_block_hash DO
|
||||
UPDATE SET zonefile = EXCLUDED.zonefile
|
||||
`;
|
||||
const nameValues: BnsNameInsertValues = {
|
||||
name: bnsName.name,
|
||||
address: bnsName.address,
|
||||
registered_at: bnsName.registered_at,
|
||||
expire_block: bnsName.expire_block,
|
||||
name: name,
|
||||
address: address,
|
||||
registered_at: registered_at,
|
||||
expire_block: expireBlock,
|
||||
zonefile_hash: validZonefileHash,
|
||||
namespace_id: bnsName.namespace_id,
|
||||
tx_index: bnsName.tx_index,
|
||||
tx_id: bnsName.tx_id,
|
||||
status: bnsName.status ?? null,
|
||||
canonical: bnsName.canonical,
|
||||
namespace_id: namespace_id,
|
||||
tx_index: tx_index,
|
||||
tx_id: tx_id,
|
||||
status: status ?? null,
|
||||
canonical: canonical,
|
||||
index_block_hash: blockData.index_block_hash,
|
||||
parent_index_block_hash: blockData.parent_index_block_hash,
|
||||
microblock_hash: blockData.microblock_hash,
|
||||
@@ -1327,6 +1491,19 @@ export class PgWriteStore extends PgStore {
|
||||
};
|
||||
await sql`
|
||||
INSERT INTO names ${sql(nameValues)}
|
||||
ON CONFLICT ON CONSTRAINT unique_name_tx_id_index_block_hash_microblock_hash DO
|
||||
UPDATE SET
|
||||
address = EXCLUDED.address,
|
||||
registered_at = EXCLUDED.registered_at,
|
||||
expire_block = EXCLUDED.expire_block,
|
||||
zonefile_hash = EXCLUDED.zonefile_hash,
|
||||
namespace_id = EXCLUDED.namespace_id,
|
||||
tx_index = EXCLUDED.tx_index,
|
||||
status = EXCLUDED.status,
|
||||
canonical = EXCLUDED.canonical,
|
||||
parent_index_block_hash = EXCLUDED.parent_index_block_hash,
|
||||
microblock_sequence = EXCLUDED.microblock_sequence,
|
||||
microblock_canonical = EXCLUDED.microblock_canonical
|
||||
`;
|
||||
}
|
||||
|
||||
@@ -1365,6 +1542,24 @@ export class PgWriteStore extends PgStore {
|
||||
};
|
||||
await sql`
|
||||
INSERT INTO namespaces ${sql(values)}
|
||||
ON CONFLICT ON CONSTRAINT unique_namespace_id_tx_id_index_block_hash_microblock_hash DO
|
||||
UPDATE SET
|
||||
launched_at = EXCLUDED.launched_at,
|
||||
address = EXCLUDED.address,
|
||||
reveal_block = EXCLUDED.reveal_block,
|
||||
ready_block = EXCLUDED.ready_block,
|
||||
buckets = EXCLUDED.buckets,
|
||||
base = EXCLUDED.base,
|
||||
coeff = EXCLUDED.coeff,
|
||||
nonalpha_discount = EXCLUDED.nonalpha_discount,
|
||||
no_vowel_discount = EXCLUDED.no_vowel_discount,
|
||||
lifetime = EXCLUDED.lifetime,
|
||||
status = EXCLUDED.status,
|
||||
tx_index = EXCLUDED.tx_index,
|
||||
canonical = EXCLUDED.canonical,
|
||||
parent_index_block_hash = EXCLUDED.parent_index_block_hash,
|
||||
microblock_sequence = EXCLUDED.microblock_sequence,
|
||||
microblock_canonical = EXCLUDED.microblock_canonical
|
||||
`;
|
||||
}
|
||||
|
||||
@@ -1604,12 +1799,12 @@ export class PgWriteStore extends PgStore {
|
||||
for (const smartContract of entry.smartContracts) {
|
||||
await this.updateSmartContract(sql, entry.tx, smartContract);
|
||||
}
|
||||
for (const bnsName of entry.names) {
|
||||
await this.updateNames(sql, entry.tx, bnsName);
|
||||
}
|
||||
for (const namespace of entry.namespaces) {
|
||||
await this.updateNamespaces(sql, entry.tx, namespace);
|
||||
}
|
||||
for (const bnsName of entry.names) {
|
||||
await this.updateNames(sql, entry.tx, bnsName);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,15 +1,16 @@
|
||||
import * as path from 'path';
|
||||
import * as fs from 'fs';
|
||||
import { startEventServer } from '../event-stream/event-server';
|
||||
import { getApiConfiguredChainID, httpPostRequest, logger } from '../helpers';
|
||||
import { findTsvBlockHeight, getDbBlockHeight } from './helpers';
|
||||
import { PgWriteStore } from '../datastore/pg-write-store';
|
||||
import { cycleMigrations, dangerousDropAllTables } from '../datastore/migrations';
|
||||
import { defaultLogLevel, getApiConfiguredChainID, httpPostRequest, logger } from '../helpers';
|
||||
import { findBnsGenesisBlockData, findTsvBlockHeight, getDbBlockHeight } from './helpers';
|
||||
import { importV1BnsNames, importV1BnsSubdomains, importV1TokenOfferingData } from '../import-v1';
|
||||
import {
|
||||
containsAnyRawEventRequests,
|
||||
exportRawEventRequests,
|
||||
getRawEventRequests,
|
||||
} from '../datastore/event-requests';
|
||||
import { cycleMigrations, dangerousDropAllTables } from '../datastore/migrations';
|
||||
import { PgWriteStore } from '../datastore/pg-write-store';
|
||||
|
||||
enum EventImportMode {
|
||||
/**
|
||||
@@ -113,6 +114,8 @@ export async function importEventsFromTsv(
|
||||
if (eventImportMode === EventImportMode.pruned) {
|
||||
console.log(`Ignoring all prunable events before block height: ${prunedBlockHeight}`);
|
||||
}
|
||||
// Look for the TSV's genesis block information for BNS import.
|
||||
const tsvGenesisBlockData = await findBnsGenesisBlockData(resolvedFilePath);
|
||||
|
||||
const db = await PgWriteStore.connect({
|
||||
usageName: 'import-events',
|
||||
@@ -128,6 +131,18 @@ export async function importEventsFromTsv(
|
||||
httpLogLevel: 'debug',
|
||||
});
|
||||
|
||||
await importV1TokenOfferingData(db);
|
||||
|
||||
// Import V1 BNS names first. Subdomains will be imported after TSV replay is finished in order to
|
||||
// keep the size of the `subdomains` table small.
|
||||
if (process.env.BNS_IMPORT_DIR) {
|
||||
logger.info(`Using BNS export data from: ${process.env.BNS_IMPORT_DIR}`);
|
||||
await importV1BnsNames(db, process.env.BNS_IMPORT_DIR, tsvGenesisBlockData);
|
||||
} else {
|
||||
logger.warn(`Notice: full BNS functionality requires 'BNS_IMPORT_DIR' to be set.`);
|
||||
}
|
||||
|
||||
// Import TSV chain data
|
||||
const readStream = fs.createReadStream(resolvedFilePath);
|
||||
const rawEventsIterator = getRawEventRequests(readStream, status => {
|
||||
console.log(status);
|
||||
@@ -162,10 +177,17 @@ export async function importEventsFromTsv(
|
||||
});
|
||||
if (rawEvent.event_path === '/new_block') {
|
||||
blockHeight = await getDbBlockHeight(db);
|
||||
if (blockHeight % 1000 === 0) {
|
||||
console.log(`Event file block height reached: ${blockHeight}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
await db.finishEventReplay();
|
||||
if (process.env.BNS_IMPORT_DIR) {
|
||||
logger.level = defaultLogLevel;
|
||||
await importV1BnsSubdomains(db, process.env.BNS_IMPORT_DIR, tsvGenesisBlockData);
|
||||
}
|
||||
console.log(`Event import and playback successful.`);
|
||||
await eventServer.closeAsync();
|
||||
await db.close();
|
||||
|
||||
@@ -1,6 +1,15 @@
|
||||
import { PgWriteStore } from '../datastore/pg-write-store';
|
||||
import * as fs from 'fs';
|
||||
import * as readline from 'readline';
|
||||
import { decodeTransaction, TxPayloadTypeID } from 'stacks-encoding-native-js';
|
||||
import { DataStoreBnsBlockData } from '../datastore/common';
|
||||
import { ReverseFileStream } from './reverse-file-stream';
|
||||
|
||||
export type BnsGenesisBlock = DataStoreBnsBlockData & {
|
||||
tx_id: string;
|
||||
tx_index: number;
|
||||
};
|
||||
|
||||
/**
|
||||
* Traverse a TSV file in reverse to find the last received `/new_block` node message and return
|
||||
* the `block_height` reported by that event. Even though the block produced by that event might
|
||||
@@ -26,6 +35,47 @@ export async function findTsvBlockHeight(filePath: string): Promise<number> {
|
||||
return blockHeight;
|
||||
}
|
||||
|
||||
/**
|
||||
* Traverse a TSV file to find the genesis block and extract its data so we can use it during V1 BNS
|
||||
* import.
|
||||
* @param filePath - TSV path
|
||||
* @returns Genesis block data
|
||||
*/
|
||||
export async function findBnsGenesisBlockData(filePath: string): Promise<BnsGenesisBlock> {
|
||||
const rl = readline.createInterface({
|
||||
input: fs.createReadStream(filePath),
|
||||
crlfDelay: Infinity,
|
||||
});
|
||||
for await (const line of rl) {
|
||||
const columns = line.split('\t');
|
||||
const eventName = columns[2];
|
||||
if (eventName === '/new_block') {
|
||||
const payload = JSON.parse(columns[3]);
|
||||
// Look for block 1
|
||||
if (payload.block_height === 1) {
|
||||
for (const tx of payload.transactions) {
|
||||
const decodedTx = decodeTransaction(tx.raw_tx);
|
||||
// Look for the only token transfer transaction in the genesis block. This is the one
|
||||
// that contains all the events, including all BNS name registrations.
|
||||
if (decodedTx.payload.type_id === TxPayloadTypeID.TokenTransfer) {
|
||||
rl.close();
|
||||
return {
|
||||
index_block_hash: payload.index_block_hash,
|
||||
parent_index_block_hash: payload.parent_index_block_hash,
|
||||
microblock_hash: payload.parent_microblock,
|
||||
microblock_sequence: payload.parent_microblock_sequence,
|
||||
microblock_canonical: true,
|
||||
tx_id: decodedTx.tx_id,
|
||||
tx_index: tx.tx_index,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
throw new Error('BNS genesis block data not found');
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current block height from the DB. We won't use the `getChainTip` method since that
|
||||
* adds some conversions from block hashes into strings that we're not interested in. We also can't
|
||||
|
||||
@@ -18,14 +18,5 @@ export const enum BnsContractIdentifier {
|
||||
mainnet = 'SP000000000000000000002Q6VF78.bns',
|
||||
testnet = 'ST000000000000000000002AMW42H.bns',
|
||||
}
|
||||
export const namespaceReadyFunction = 'namespace-ready';
|
||||
export const nameFunctions = [
|
||||
'name-import',
|
||||
'name-revoke',
|
||||
'name-update',
|
||||
'name-transfer',
|
||||
'name-renewal',
|
||||
'name-register',
|
||||
];
|
||||
|
||||
export const bnsBlockchain = 'stacks';
|
||||
@@ -1,29 +1,30 @@
|
||||
import { Address, ChainID, StacksMessageType } from '@stacks/transactions';
|
||||
import { DbBnsNamespace } from './datastore/common';
|
||||
import { hexToBuffer, hexToUtf8String } from './helpers';
|
||||
import { CoreNodeParsedTxMessage } from './event-stream/core-node-message';
|
||||
import { StacksCoreRpcClient, getCoreNodeEndpoint } from './core-rpc/client';
|
||||
import { BufferCV, ChainID, ClarityType, hexToCV, StringAsciiCV } from '@stacks/transactions';
|
||||
import { hexToBuffer, hexToUtf8String } from '../../helpers';
|
||||
import {
|
||||
CoreNodeEvent,
|
||||
CoreNodeEventType,
|
||||
CoreNodeParsedTxMessage,
|
||||
NftTransferEvent,
|
||||
} from '../../event-stream/core-node-message';
|
||||
import { getCoreNodeEndpoint } from '../../core-rpc/client';
|
||||
import { StacksMainnet, StacksTestnet } from '@stacks/network';
|
||||
import { URIType } from 'zone-file/dist/zoneFile';
|
||||
import { BnsContractIdentifier } from './bns-constants';
|
||||
import { BnsContractIdentifier, printTopic } from './bns-constants';
|
||||
import * as crypto from 'crypto';
|
||||
import {
|
||||
ClarityTypeID,
|
||||
decodeClarityValue,
|
||||
ClarityValue,
|
||||
ClarityValueBuffer,
|
||||
ClarityValueInt,
|
||||
ClarityValueList,
|
||||
ClarityValueOptional,
|
||||
ClarityValueOptionalSome,
|
||||
ClarityValueOptionalUInt,
|
||||
ClarityValuePrincipalStandard,
|
||||
ClarityValueStringAscii,
|
||||
ClarityValueTuple,
|
||||
ClarityValueUInt,
|
||||
TxPayloadTypeID,
|
||||
ClarityValuePrincipalContract,
|
||||
} from 'stacks-encoding-native-js';
|
||||
import { SmartContractEvent } from '../core-node-message';
|
||||
import { DbBnsNamespace, DbBnsName } from '../../datastore/common';
|
||||
|
||||
interface Attachment {
|
||||
attachment: {
|
||||
@@ -160,8 +161,8 @@ export function parseNamespaceRawValue(
|
||||
const namespaceBns: DbBnsNamespace = {
|
||||
namespace_id: namespace,
|
||||
address: address,
|
||||
base: Number(base),
|
||||
coeff: Number(coeff),
|
||||
base: base,
|
||||
coeff: coeff,
|
||||
launched_at: launched_at,
|
||||
lifetime: Number(lifetime),
|
||||
no_vowel_discount: Number(no_vowel_discount),
|
||||
@@ -177,39 +178,6 @@ export function parseNamespaceRawValue(
|
||||
return namespaceBns;
|
||||
}
|
||||
|
||||
export function getFunctionName(tx_id: string, transactions: CoreNodeParsedTxMessage[]): string {
|
||||
const contract_function_name: string = '';
|
||||
for (const tx of transactions) {
|
||||
if (tx.core_tx.txid === tx_id) {
|
||||
if (tx.parsed_tx.payload.type_id === TxPayloadTypeID.ContractCall) {
|
||||
return tx.parsed_tx.payload.function_name;
|
||||
}
|
||||
}
|
||||
}
|
||||
return contract_function_name;
|
||||
}
|
||||
|
||||
export function getNewOwner(
|
||||
tx_id: string,
|
||||
transactions: CoreNodeParsedTxMessage[]
|
||||
): string | undefined {
|
||||
for (const tx of transactions) {
|
||||
if (tx.core_tx.txid === tx_id) {
|
||||
if (tx.parsed_tx.payload.type_id === TxPayloadTypeID.ContractCall) {
|
||||
if (
|
||||
tx.parsed_tx.payload.function_args.length >= 3 &&
|
||||
tx.parsed_tx.payload.function_args[2].type_id === ClarityTypeID.PrincipalStandard
|
||||
) {
|
||||
const decoded = decodeClarityValue(tx.parsed_tx.payload.function_args[2].hex);
|
||||
const principal = decoded as ClarityValuePrincipalStandard;
|
||||
principal.address;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
export function GetStacksNetwork(chainId: ChainID) {
|
||||
const network = chainId === ChainID.Mainnet ? new StacksMainnet() : new StacksTestnet();
|
||||
network.coreApiUrl = `http://${getCoreNodeEndpoint()}`;
|
||||
@@ -272,3 +240,123 @@ export function getBnsContractID(chainId: ChainID) {
|
||||
chainId === ChainID.Mainnet ? BnsContractIdentifier.mainnet : BnsContractIdentifier.testnet;
|
||||
return contractId;
|
||||
}
|
||||
|
||||
function isEventFromBnsContract(event: SmartContractEvent): boolean {
|
||||
return (
|
||||
event.committed === true &&
|
||||
event.contract_event.topic === printTopic &&
|
||||
(event.contract_event.contract_identifier === BnsContractIdentifier.mainnet ||
|
||||
event.contract_event.contract_identifier === BnsContractIdentifier.testnet)
|
||||
);
|
||||
}
|
||||
|
||||
export function parseNameRenewalWithNoZonefileHashFromContractCall(
|
||||
tx: CoreNodeParsedTxMessage,
|
||||
chainId: ChainID
|
||||
): DbBnsName | undefined {
|
||||
const payload = tx.parsed_tx.payload;
|
||||
if (
|
||||
tx.core_tx.status === 'success' &&
|
||||
payload.type_id === TxPayloadTypeID.ContractCall &&
|
||||
payload.function_name === 'name-renewal' &&
|
||||
getBnsContractID(chainId) === `${payload.address}.${payload.contract_name}` &&
|
||||
payload.function_args.length === 5 &&
|
||||
hexToCV(payload.function_args[4].hex).type === ClarityType.OptionalNone
|
||||
) {
|
||||
const namespace = (hexToCV(payload.function_args[0].hex) as BufferCV).buffer.toString('utf8');
|
||||
const name = (hexToCV(payload.function_args[1].hex) as BufferCV).buffer.toString('utf8');
|
||||
return {
|
||||
name: `${name}.${namespace}`,
|
||||
namespace_id: namespace,
|
||||
// NOTE: We're not using the `new_owner` argument here because there's a bug in the BNS
|
||||
// contract that doesn't actually transfer the name to the given principal:
|
||||
// https://github.com/stacks-network/stacks-blockchain/issues/2680, maybe this will be fixed
|
||||
// in Stacks 2.1
|
||||
address: tx.sender_address,
|
||||
// expire_block will be calculated upon DB insert based on the namespace's lifetime.
|
||||
expire_block: 0,
|
||||
registered_at: tx.block_height,
|
||||
// Since we received no zonefile_hash, the previous one will be reused when writing to DB.
|
||||
zonefile_hash: '',
|
||||
zonefile: '',
|
||||
tx_id: tx.parsed_tx.tx_id,
|
||||
tx_index: tx.core_tx.tx_index,
|
||||
status: 'name-renewal',
|
||||
canonical: true,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export function parseNameFromContractEvent(
|
||||
event: SmartContractEvent,
|
||||
tx: CoreNodeParsedTxMessage,
|
||||
txEvents: CoreNodeEvent[],
|
||||
blockHeight: number,
|
||||
chainId: ChainID
|
||||
): DbBnsName | undefined {
|
||||
if (tx.core_tx.status !== 'success' || !isEventFromBnsContract(event)) {
|
||||
return;
|
||||
}
|
||||
let attachment: Attachment;
|
||||
try {
|
||||
attachment = parseNameRawValue(event.contract_event.raw_value);
|
||||
} catch (error) {
|
||||
return;
|
||||
}
|
||||
let name_address = attachment.attachment.metadata.tx_sender.address;
|
||||
// Is this a `name-transfer`? If so, look for the new owner in an `nft_transfer` event bundled in
|
||||
// the same transaction.
|
||||
if (attachment.attachment.metadata.op === 'name-transfer') {
|
||||
for (const txEvent of txEvents) {
|
||||
if (
|
||||
txEvent.type === CoreNodeEventType.NftTransferEvent &&
|
||||
txEvent.nft_transfer_event.asset_identifier === `${getBnsContractID(chainId)}::names`
|
||||
) {
|
||||
name_address = txEvent.nft_transfer_event.recipient;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
const name: DbBnsName = {
|
||||
name: `${attachment.attachment.metadata.name}.${attachment.attachment.metadata.namespace}`,
|
||||
namespace_id: attachment.attachment.metadata.namespace,
|
||||
address: name_address,
|
||||
// expire_block will be calculated upon DB insert based on the namespace's lifetime.
|
||||
expire_block: 0,
|
||||
registered_at: blockHeight,
|
||||
zonefile_hash: attachment.attachment.hash,
|
||||
// zonefile will be updated when an `/attachments/new` message arrives.
|
||||
zonefile: '',
|
||||
tx_id: event.txid,
|
||||
tx_index: tx.core_tx.tx_index,
|
||||
status: attachment.attachment.metadata.op,
|
||||
canonical: true,
|
||||
};
|
||||
return name;
|
||||
}
|
||||
|
||||
export function parseNamespaceFromContractEvent(
|
||||
event: SmartContractEvent,
|
||||
tx: CoreNodeParsedTxMessage,
|
||||
blockHeight: number
|
||||
): DbBnsNamespace | undefined {
|
||||
if (tx.core_tx.status !== 'success' || !isEventFromBnsContract(event)) {
|
||||
return;
|
||||
}
|
||||
// Look for a `namespace-ready` BNS print event.
|
||||
const decodedEvent = hexToCV(event.contract_event.raw_value);
|
||||
if (
|
||||
decodedEvent.type === ClarityType.Tuple &&
|
||||
decodedEvent.data.status &&
|
||||
decodedEvent.data.status.type === ClarityType.StringASCII &&
|
||||
decodedEvent.data.status.data === 'ready'
|
||||
) {
|
||||
const namespace = parseNamespaceRawValue(
|
||||
event.contract_event.raw_value,
|
||||
blockHeight,
|
||||
event.txid,
|
||||
tx.core_tx.tx_index
|
||||
);
|
||||
return namespace;
|
||||
}
|
||||
}
|
||||
@@ -25,7 +25,7 @@ interface CoreNodeEventBase {
|
||||
committed: boolean;
|
||||
}
|
||||
|
||||
interface SmartContractEvent extends CoreNodeEventBase {
|
||||
export interface SmartContractEvent extends CoreNodeEventBase {
|
||||
type: CoreNodeEventType.ContractEvent;
|
||||
contract_event: {
|
||||
/** Fully qualified contract ID, e.g. "ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH.kv-store" */
|
||||
@@ -76,7 +76,7 @@ export interface StxLockEvent extends CoreNodeEventBase {
|
||||
};
|
||||
}
|
||||
|
||||
interface NftTransferEvent extends CoreNodeEventBase {
|
||||
export interface NftTransferEvent extends CoreNodeEventBase {
|
||||
type: CoreNodeEventType.NftTransferEvent;
|
||||
nft_transfer_event: {
|
||||
/** Fully qualified asset ID, e.g. "ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH.contract-name.asset-name" */
|
||||
|
||||
@@ -7,7 +7,7 @@ import { asyncHandler } from '../api/async-handler';
|
||||
import PQueue from 'p-queue';
|
||||
import * as expressWinston from 'express-winston';
|
||||
import * as winston from 'winston';
|
||||
import { hexToBuffer, logError, logger, I32_MAX, LogLevel } from '../helpers';
|
||||
import { hexToBuffer, logError, logger, LogLevel } from '../helpers';
|
||||
import {
|
||||
CoreNodeBlockMessage,
|
||||
CoreNodeEventType,
|
||||
@@ -32,12 +32,10 @@ import {
|
||||
DbMinerReward,
|
||||
DbBurnchainReward,
|
||||
DbRewardSlotHolder,
|
||||
DbBnsName,
|
||||
DbBnsNamespace,
|
||||
DbBnsSubdomain,
|
||||
DataStoreMicroblockUpdateData,
|
||||
DataStoreTxEventData,
|
||||
DbMicroblock,
|
||||
DataStoreAttachmentData,
|
||||
} from '../datastore/common';
|
||||
import {
|
||||
getTxSenderAddress,
|
||||
@@ -55,21 +53,12 @@ import {
|
||||
TxPayloadTypeID,
|
||||
} from 'stacks-encoding-native-js';
|
||||
import { ChainID } from '@stacks/transactions';
|
||||
import { BnsContractIdentifier } from './bns/bns-constants';
|
||||
import {
|
||||
getFunctionName,
|
||||
getNewOwner,
|
||||
parseNameRawValue,
|
||||
parseNamespaceRawValue,
|
||||
parseResolver,
|
||||
parseZoneFileTxt,
|
||||
} from '../bns-helpers';
|
||||
import {
|
||||
printTopic,
|
||||
namespaceReadyFunction,
|
||||
nameFunctions,
|
||||
BnsContractIdentifier,
|
||||
} from '../bns-constants';
|
||||
import * as zoneFileParser from 'zone-file';
|
||||
parseNameFromContractEvent,
|
||||
parseNameRenewalWithNoZonefileHashFromContractCall,
|
||||
parseNamespaceFromContractEvent,
|
||||
} from './bns/bns-helpers';
|
||||
import { PgWriteStore } from '../datastore/pg-write-store';
|
||||
import {
|
||||
createDbMempoolTxFromCoreMsg,
|
||||
@@ -210,10 +199,15 @@ async function handleMicroblockMessage(
|
||||
});
|
||||
const updateData: DataStoreMicroblockUpdateData = {
|
||||
microblocks: dbMicroblocks,
|
||||
txs: parseDataStoreTxEventData(parsedTxs, msg.events, {
|
||||
block_height: -1, // TODO: fill during initial db insert
|
||||
index_block_hash: '',
|
||||
}),
|
||||
txs: parseDataStoreTxEventData(
|
||||
parsedTxs,
|
||||
msg.events,
|
||||
{
|
||||
block_height: -1, // TODO: fill during initial db insert
|
||||
index_block_hash: '',
|
||||
},
|
||||
chainId
|
||||
),
|
||||
};
|
||||
await db.updateMicroblocks(updateData);
|
||||
}
|
||||
@@ -310,7 +304,7 @@ async function handleBlockMessage(
|
||||
block: dbBlock,
|
||||
microblocks: dbMicroblocks,
|
||||
minerRewards: dbMinerRewards,
|
||||
txs: parseDataStoreTxEventData(parsedTxs, msg.events, msg),
|
||||
txs: parseDataStoreTxEventData(parsedTxs, msg.events, msg, chainId),
|
||||
};
|
||||
|
||||
await db.update(dbData);
|
||||
@@ -322,7 +316,8 @@ function parseDataStoreTxEventData(
|
||||
blockData: {
|
||||
block_height: number;
|
||||
index_block_hash: string;
|
||||
}
|
||||
},
|
||||
chainId: ChainID
|
||||
): DataStoreTxEventData[] {
|
||||
const dbData: DataStoreTxEventData[] = parsedTxs.map(tx => {
|
||||
const dbTx: DataStoreBlockUpdateData['txs'][number] = {
|
||||
@@ -336,16 +331,29 @@ function parseDataStoreTxEventData(
|
||||
names: [],
|
||||
namespaces: [],
|
||||
};
|
||||
if (tx.parsed_tx.payload.type_id === TxPayloadTypeID.SmartContract) {
|
||||
const contractId = `${tx.sender_address}.${tx.parsed_tx.payload.contract_name}`;
|
||||
dbTx.smartContracts.push({
|
||||
tx_id: tx.core_tx.txid,
|
||||
contract_id: contractId,
|
||||
block_height: blockData.block_height,
|
||||
source_code: tx.parsed_tx.payload.code_body,
|
||||
abi: JSON.stringify(tx.core_tx.contract_abi),
|
||||
canonical: true,
|
||||
});
|
||||
switch (tx.parsed_tx.payload.type_id) {
|
||||
case TxPayloadTypeID.SmartContract:
|
||||
const contractId = `${tx.sender_address}.${tx.parsed_tx.payload.contract_name}`;
|
||||
dbTx.smartContracts.push({
|
||||
tx_id: tx.core_tx.txid,
|
||||
contract_id: contractId,
|
||||
block_height: blockData.block_height,
|
||||
source_code: tx.parsed_tx.payload.code_body,
|
||||
abi: JSON.stringify(tx.core_tx.contract_abi),
|
||||
canonical: true,
|
||||
});
|
||||
break;
|
||||
case TxPayloadTypeID.ContractCall:
|
||||
// Name renewals can happen without a zonefile_hash. In that case, the BNS contract does NOT
|
||||
// emit a `name-renewal` contract log, causing us to miss this event. This function catches
|
||||
// those cases.
|
||||
const name = parseNameRenewalWithNoZonefileHashFromContractCall(tx, chainId);
|
||||
if (name) {
|
||||
dbTx.names.push(name);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return dbTx;
|
||||
});
|
||||
@@ -378,51 +386,24 @@ function parseDataStoreTxEventData(
|
||||
value: event.contract_event.raw_value,
|
||||
};
|
||||
dbTx.contractLogEvents.push(entry);
|
||||
if (
|
||||
event.contract_event.topic === printTopic &&
|
||||
(event.contract_event.contract_identifier === BnsContractIdentifier.mainnet ||
|
||||
event.contract_event.contract_identifier === BnsContractIdentifier.testnet)
|
||||
) {
|
||||
const functionName = getFunctionName(event.txid, parsedTxs);
|
||||
if (nameFunctions.includes(functionName)) {
|
||||
const attachment = parseNameRawValue(event.contract_event.raw_value);
|
||||
let name_address = attachment.attachment.metadata.tx_sender.address;
|
||||
if (functionName === 'name-transfer') {
|
||||
const new_owner = getNewOwner(event.txid, parsedTxs);
|
||||
if (new_owner) {
|
||||
name_address = new_owner;
|
||||
}
|
||||
}
|
||||
const name: DbBnsName = {
|
||||
name: attachment.attachment.metadata.name.concat(
|
||||
'.',
|
||||
attachment.attachment.metadata.namespace
|
||||
),
|
||||
namespace_id: attachment.attachment.metadata.namespace,
|
||||
address: name_address,
|
||||
expire_block: 0,
|
||||
registered_at: blockData.block_height,
|
||||
zonefile_hash: attachment.attachment.hash,
|
||||
zonefile: '', // zone file will be updated in /attachments/new
|
||||
tx_id: event.txid,
|
||||
tx_index: entry.tx_index,
|
||||
status: attachment.attachment.metadata.op,
|
||||
canonical: true,
|
||||
};
|
||||
dbTx.names.push(name);
|
||||
}
|
||||
if (functionName === namespaceReadyFunction) {
|
||||
// event received for namespaces
|
||||
const namespace: DbBnsNamespace | undefined = parseNamespaceRawValue(
|
||||
event.contract_event.raw_value,
|
||||
blockData.block_height,
|
||||
event.txid,
|
||||
entry.tx_index
|
||||
);
|
||||
if (namespace != undefined) {
|
||||
dbTx.namespaces.push(namespace);
|
||||
}
|
||||
}
|
||||
// Check if we have new BNS names or namespaces.
|
||||
const parsedTx = parsedTxs.find(entry => entry.core_tx.txid === event.txid);
|
||||
if (!parsedTx) {
|
||||
throw new Error(`Unexpected missing tx during BNS parsing by tx_id ${event.txid}`);
|
||||
}
|
||||
const name = parseNameFromContractEvent(
|
||||
event,
|
||||
parsedTx,
|
||||
events,
|
||||
blockData.block_height,
|
||||
chainId
|
||||
);
|
||||
if (name) {
|
||||
dbTx.names.push(name);
|
||||
}
|
||||
const namespace = parseNamespaceFromContractEvent(event, parsedTx, blockData.block_height);
|
||||
if (namespace) {
|
||||
dbTx.namespaces.push(namespace);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@@ -572,83 +553,33 @@ function parseDataStoreTxEventData(
|
||||
}
|
||||
|
||||
async function handleNewAttachmentMessage(msg: CoreNodeAttachmentMessage[], db: PgWriteStore) {
|
||||
for (const attachment of msg) {
|
||||
if (
|
||||
attachment.contract_id === BnsContractIdentifier.mainnet ||
|
||||
attachment.contract_id === BnsContractIdentifier.testnet
|
||||
) {
|
||||
const metadataCV = decodeClarityValue<
|
||||
ClarityValueTuple<{
|
||||
op: ClarityValueStringAscii;
|
||||
name: ClarityValueBuffer;
|
||||
namespace: ClarityValueBuffer;
|
||||
}>
|
||||
>(attachment.metadata);
|
||||
const op = metadataCV.data['op'].data;
|
||||
const zonefile = Buffer.from(attachment.content.slice(2), 'hex').toString();
|
||||
const zoneFileHash = attachment.content_hash;
|
||||
if (op === 'name-update') {
|
||||
const name = hexToBuffer(metadataCV.data['name'].buffer).toString('utf8');
|
||||
const namespace = hexToBuffer(metadataCV.data['namespace'].buffer).toString('utf8');
|
||||
const zoneFileContents = zoneFileParser.parseZoneFile(zonefile);
|
||||
const zoneFileTxt = zoneFileContents.txt;
|
||||
const blockData = {
|
||||
index_block_hash: '',
|
||||
parent_index_block_hash: '',
|
||||
microblock_hash: '',
|
||||
microblock_sequence: I32_MAX,
|
||||
microblock_canonical: true,
|
||||
};
|
||||
// Case for subdomain
|
||||
if (zoneFileTxt) {
|
||||
// get unresolved subdomain
|
||||
let isCanonical = true;
|
||||
const dbTx = await db.getTxStrict({
|
||||
txId: attachment.tx_id,
|
||||
indexBlockHash: attachment.index_block_hash,
|
||||
});
|
||||
if (dbTx.found) {
|
||||
isCanonical = dbTx.result.canonical;
|
||||
blockData.index_block_hash = dbTx.result.index_block_hash;
|
||||
blockData.parent_index_block_hash = dbTx.result.parent_index_block_hash;
|
||||
blockData.microblock_hash = dbTx.result.microblock_hash;
|
||||
blockData.microblock_sequence = dbTx.result.microblock_sequence;
|
||||
blockData.microblock_canonical = dbTx.result.microblock_canonical;
|
||||
} else {
|
||||
logger.warn(
|
||||
`Could not find transaction ${attachment.tx_id} associated with attachment`
|
||||
);
|
||||
}
|
||||
// case for subdomain
|
||||
const subdomains: DbBnsSubdomain[] = [];
|
||||
for (let i = 0; i < zoneFileTxt.length; i++) {
|
||||
const zoneFile = zoneFileTxt[i];
|
||||
const parsedTxt = parseZoneFileTxt(zoneFile.txt);
|
||||
if (parsedTxt.owner === '') continue; //if txt has no owner , skip it
|
||||
const subdomain: DbBnsSubdomain = {
|
||||
name: name.concat('.', namespace),
|
||||
namespace_id: namespace,
|
||||
fully_qualified_subdomain: zoneFile.name.concat('.', name, '.', namespace),
|
||||
owner: parsedTxt.owner,
|
||||
zonefile_hash: parsedTxt.zoneFileHash,
|
||||
zonefile: parsedTxt.zoneFile,
|
||||
tx_id: attachment.tx_id,
|
||||
tx_index: -1,
|
||||
canonical: isCanonical,
|
||||
parent_zonefile_hash: attachment.content_hash.slice(2),
|
||||
parent_zonefile_index: 0, //TODO need to figure out this field
|
||||
block_height: Number.parseInt(attachment.block_height, 10),
|
||||
zonefile_offset: 1,
|
||||
resolver: zoneFileContents.uri ? parseResolver(zoneFileContents.uri) : '',
|
||||
};
|
||||
subdomains.push(subdomain);
|
||||
}
|
||||
await db.resolveBnsSubdomains(blockData, subdomains);
|
||||
}
|
||||
const attachments = msg
|
||||
.map(message => {
|
||||
if (
|
||||
message.contract_id === BnsContractIdentifier.mainnet ||
|
||||
message.contract_id === BnsContractIdentifier.testnet
|
||||
) {
|
||||
const metadataCV = decodeClarityValue<
|
||||
ClarityValueTuple<{
|
||||
op: ClarityValueStringAscii;
|
||||
name: ClarityValueBuffer;
|
||||
namespace: ClarityValueBuffer;
|
||||
}>
|
||||
>(message.metadata);
|
||||
return {
|
||||
op: metadataCV.data['op'].data,
|
||||
zonefile: message.content.slice(2),
|
||||
name: hexToBuffer(metadataCV.data['name'].buffer).toString('utf8'),
|
||||
namespace: hexToBuffer(metadataCV.data['namespace'].buffer).toString('utf8'),
|
||||
zonefileHash: message.content_hash,
|
||||
txId: message.tx_id,
|
||||
indexBlockHash: message.index_block_hash,
|
||||
blockHeight: Number.parseInt(message.block_height, 10),
|
||||
} as DataStoreAttachmentData;
|
||||
}
|
||||
await db.updateZoneContent(zonefile, zoneFileHash, attachment.tx_id);
|
||||
}
|
||||
}
|
||||
})
|
||||
.filter((msg): msg is DataStoreAttachmentData => !!msg);
|
||||
await db.updateAttachments(attachments);
|
||||
}
|
||||
|
||||
interface EventMessageHandler {
|
||||
|
||||
@@ -157,7 +157,7 @@ type DisabledLogLevels = Exclude<
|
||||
type LoggerInterface = Omit<winston.Logger, DisabledLogLevels> & { level: LogLevel };
|
||||
|
||||
const LOG_LEVELS: LogLevel[] = ['error', 'warn', 'info', 'http', 'verbose', 'debug', 'silly'];
|
||||
const defaultLogLevel: LogLevel = (() => {
|
||||
export const defaultLogLevel: LogLevel = (() => {
|
||||
const STACKS_API_LOG_LEVEL_ENV_VAR = 'STACKS_API_LOG_LEVEL';
|
||||
const logLevelEnvVar = process.env[
|
||||
STACKS_API_LOG_LEVEL_ENV_VAR
|
||||
|
||||
@@ -22,15 +22,9 @@ import {
|
||||
logger,
|
||||
REPO_DIR,
|
||||
} from '../helpers';
|
||||
import { PgWriteStore } from '../datastore/pg-write-store';
|
||||
import { BnsGenesisBlock } from '../event-replay/helpers';
|
||||
import { PgSqlClient } from '../datastore/connection';
|
||||
|
||||
const IMPORT_FILES = [
|
||||
'chainstate.txt',
|
||||
'name_zonefiles.txt',
|
||||
'subdomains.csv',
|
||||
'subdomain_zonefiles.txt',
|
||||
];
|
||||
import { PgWriteStore } from '../datastore/pg-write-store';
|
||||
|
||||
const finished = util.promisify(stream.finished);
|
||||
const pipeline = util.promisify(stream.pipeline);
|
||||
@@ -84,21 +78,21 @@ class ChainProcessor extends stream.Writable {
|
||||
zhashes: Map<string, string>;
|
||||
namespace: Map<string, DbBnsNamespace>;
|
||||
db: PgWriteStore;
|
||||
client: PgSqlClient;
|
||||
emptyBlockData = {
|
||||
index_block_hash: '',
|
||||
parent_index_block_hash: '',
|
||||
microblock_hash: '',
|
||||
microblock_sequence: I32_MAX,
|
||||
microblock_canonical: true,
|
||||
} as const;
|
||||
sql: PgSqlClient;
|
||||
genesisBlock: BnsGenesisBlock;
|
||||
|
||||
constructor(db: PgWriteStore, zhashes: Map<string, string>) {
|
||||
constructor(
|
||||
sql: PgSqlClient,
|
||||
db: PgWriteStore,
|
||||
zhashes: Map<string, string>,
|
||||
genesisBlock: BnsGenesisBlock
|
||||
) {
|
||||
super();
|
||||
this.zhashes = zhashes;
|
||||
this.namespace = new Map();
|
||||
this.client = db.sql;
|
||||
this.sql = sql;
|
||||
this.db = db;
|
||||
this.genesisBlock = genesisBlock;
|
||||
logger.info(`${this.tag}: importer starting`);
|
||||
}
|
||||
|
||||
@@ -157,16 +151,16 @@ class ChainProcessor extends stream.Writable {
|
||||
name: parts[0],
|
||||
address: parts[1],
|
||||
namespace_id: ns,
|
||||
registered_at: 0,
|
||||
registered_at: 1,
|
||||
expire_block: namespace.lifetime,
|
||||
zonefile: zonefile,
|
||||
zonefile_hash: zonefileHash,
|
||||
tx_id: '',
|
||||
tx_index: 0,
|
||||
tx_id: this.genesisBlock.tx_id,
|
||||
tx_index: this.genesisBlock.tx_index,
|
||||
canonical: true,
|
||||
status: 'name-register',
|
||||
};
|
||||
await this.db.updateNames(this.client, this.emptyBlockData, obj);
|
||||
await this.db.updateNames(this.sql, this.genesisBlock, obj);
|
||||
this.rowCount += 1;
|
||||
if (obj.zonefile === '') {
|
||||
logger.verbose(
|
||||
@@ -180,20 +174,20 @@ class ChainProcessor extends stream.Writable {
|
||||
const obj: DbBnsNamespace = {
|
||||
namespace_id: parts[0],
|
||||
address: parts[1],
|
||||
reveal_block: 0,
|
||||
ready_block: 0,
|
||||
reveal_block: 1,
|
||||
ready_block: 1,
|
||||
buckets: parts[2],
|
||||
base: parseInt(parts[3], 10),
|
||||
coeff: parseInt(parts[4], 10),
|
||||
base: BigInt(parts[3]),
|
||||
coeff: BigInt(parts[4]),
|
||||
nonalpha_discount: parseInt(parts[5], 10),
|
||||
no_vowel_discount: parseInt(parts[6], 10),
|
||||
lifetime: parseInt(parts[7], 10),
|
||||
tx_id: '',
|
||||
tx_index: 0,
|
||||
tx_id: this.genesisBlock.tx_id,
|
||||
tx_index: this.genesisBlock.tx_index,
|
||||
canonical: true,
|
||||
};
|
||||
this.namespace.set(obj.namespace_id, obj);
|
||||
await this.db.updateNamespaces(this.client, this.emptyBlockData, obj);
|
||||
await this.db.updateNamespaces(this.sql, this.genesisBlock, obj);
|
||||
this.rowCount += 1;
|
||||
}
|
||||
}
|
||||
@@ -237,9 +231,13 @@ function btcToStxAddress(btcAddress: string) {
|
||||
}
|
||||
|
||||
class SubdomainTransform extends stream.Transform {
|
||||
constructor() {
|
||||
genesisBlock: BnsGenesisBlock;
|
||||
|
||||
constructor(genesisBlock: BnsGenesisBlock) {
|
||||
super({ objectMode: true, highWaterMark: SUBDOMAIN_BATCH_SIZE });
|
||||
this.genesisBlock = genesisBlock;
|
||||
}
|
||||
|
||||
_transform(data: string, _encoding: string, callback: stream.TransformCallback) {
|
||||
const parts = data.split(',');
|
||||
if (parts[0] !== 'zonefile_hash') {
|
||||
@@ -256,8 +254,8 @@ class SubdomainTransform extends stream.Transform {
|
||||
fully_qualified_subdomain: parts[2],
|
||||
owner: btcToStxAddress(parts[3]), //convert btc address to stx,
|
||||
block_height: 1, // burn_block_height: parseInt(parts[4], 10)
|
||||
tx_index: 0,
|
||||
tx_id: '',
|
||||
tx_index: this.genesisBlock.tx_index,
|
||||
tx_id: this.genesisBlock.tx_id,
|
||||
parent_zonefile_index: parseInt(parts[5], 10),
|
||||
zonefile_offset: parseInt(parts[6], 10),
|
||||
resolver: parts[7],
|
||||
@@ -307,12 +305,12 @@ async function valid(fileName: string): Promise<boolean> {
|
||||
return true;
|
||||
}
|
||||
|
||||
async function* readSubdomains(importDir: string) {
|
||||
async function* readSubdomains(importDir: string, genesisBlock: BnsGenesisBlock) {
|
||||
const metaIter = asyncIterableToGenerator<DbBnsSubdomain>(
|
||||
stream.pipeline(
|
||||
fs.createReadStream(path.join(importDir, 'subdomains.csv')),
|
||||
new LineReaderStream({ highWaterMark: SUBDOMAIN_BATCH_SIZE }),
|
||||
new SubdomainTransform(),
|
||||
new SubdomainTransform(genesisBlock),
|
||||
error => {
|
||||
if (error) {
|
||||
console.error('Error reading subdomains.csv');
|
||||
@@ -394,13 +392,7 @@ class StxVestingTransform extends stream.Transform {
|
||||
}
|
||||
}
|
||||
|
||||
export async function importV1BnsData(db: PgWriteStore, importDir: string) {
|
||||
const configState = await db.getConfigState();
|
||||
if (configState.bns_names_onchain_imported && configState.bns_subdomains_imported) {
|
||||
logger.verbose('Stacks 1.0 BNS data is already imported');
|
||||
return;
|
||||
}
|
||||
|
||||
async function validateBnsImportDir(importDir: string, importFiles: string[]) {
|
||||
try {
|
||||
const statResult = fs.statSync(importDir);
|
||||
if (!statResult.isDirectory()) {
|
||||
@@ -411,74 +403,82 @@ export async function importV1BnsData(db: PgWriteStore, importDir: string) {
|
||||
throw error;
|
||||
}
|
||||
|
||||
logger.info('Stacks 1.0 BNS data import started');
|
||||
logger.info(`Using BNS export data from: ${importDir}`);
|
||||
|
||||
// validate contents with their .sha256 files
|
||||
// check if the files we need can be read
|
||||
for (const fname of IMPORT_FILES) {
|
||||
for (const fname of importFiles) {
|
||||
if (!(await valid(path.join(importDir, fname)))) {
|
||||
const errMsg = `Cannot read import file due to sha256 mismatch: ${fname}`;
|
||||
logError(errMsg);
|
||||
throw new Error(errMsg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export async function importV1BnsNames(
|
||||
db: PgWriteStore,
|
||||
importDir: string,
|
||||
genesisBlock: BnsGenesisBlock
|
||||
) {
|
||||
const configState = await db.getConfigState();
|
||||
if (configState.bns_names_onchain_imported) {
|
||||
logger.verbose('Stacks 1.0 BNS names are already imported');
|
||||
return;
|
||||
}
|
||||
await validateBnsImportDir(importDir, ['chainstate.txt', 'name_zonefiles.txt']);
|
||||
logger.info('Stacks 1.0 BNS name import started');
|
||||
await db.sql.begin(async sql => {
|
||||
logger.info(`Disabling BNS table indices temporarily for a faster import`);
|
||||
await sql`
|
||||
UPDATE pg_index
|
||||
SET indisready = false, indisvalid = false
|
||||
WHERE indrelid = ANY (
|
||||
SELECT oid FROM pg_class
|
||||
WHERE relname IN ('subdomains', 'zonefiles', 'namespaces', 'names')
|
||||
)
|
||||
`;
|
||||
const zhashes = await readZones(path.join(importDir, 'name_zonefiles.txt'));
|
||||
await pipeline(
|
||||
fs.createReadStream(path.join(importDir, 'chainstate.txt')),
|
||||
new LineReaderStream({ highWaterMark: 100 }),
|
||||
new ChainProcessor(db, zhashes)
|
||||
new ChainProcessor(sql, db, zhashes, genesisBlock)
|
||||
);
|
||||
|
||||
const blockData = {
|
||||
index_block_hash: '',
|
||||
parent_index_block_hash: '',
|
||||
microblock_hash: '',
|
||||
microblock_sequence: I32_MAX,
|
||||
microblock_canonical: true,
|
||||
const updatedConfigState: DbConfigState = {
|
||||
...configState,
|
||||
bns_names_onchain_imported: true,
|
||||
};
|
||||
await db.updateConfigState(updatedConfigState, sql);
|
||||
});
|
||||
logger.info('Stacks 1.0 BNS name import completed');
|
||||
}
|
||||
|
||||
export async function importV1BnsSubdomains(
|
||||
db: PgWriteStore,
|
||||
importDir: string,
|
||||
genesisBlock: BnsGenesisBlock
|
||||
) {
|
||||
const configState = await db.getConfigState();
|
||||
if (configState.bns_subdomains_imported) {
|
||||
logger.verbose('Stacks 1.0 BNS subdomains are already imported');
|
||||
return;
|
||||
}
|
||||
await validateBnsImportDir(importDir, ['subdomains.csv', 'subdomain_zonefiles.txt']);
|
||||
logger.info('Stacks 1.0 BNS subdomain import started');
|
||||
await db.sql.begin(async sql => {
|
||||
let subdomainsImported = 0;
|
||||
const subdomainIter = readSubdomains(importDir);
|
||||
const subdomainIter = readSubdomains(importDir, genesisBlock);
|
||||
for await (const subdomainBatch of asyncBatchIterate(
|
||||
subdomainIter,
|
||||
SUBDOMAIN_BATCH_SIZE,
|
||||
false
|
||||
)) {
|
||||
await db.updateBatchSubdomains(sql, blockData, subdomainBatch);
|
||||
await db.updateBatchSubdomains(sql, [
|
||||
{ blockData: genesisBlock, subdomains: subdomainBatch },
|
||||
]);
|
||||
await db.updateBatchZonefiles(sql, [{ blockData: genesisBlock, subdomains: subdomainBatch }]);
|
||||
subdomainsImported += subdomainBatch.length;
|
||||
if (subdomainsImported % 10_000 === 0) {
|
||||
logger.info(`Subdomains imported: ${subdomainsImported}`);
|
||||
}
|
||||
}
|
||||
logger.info(`Subdomains imported: ${subdomainsImported}`);
|
||||
|
||||
const updatedConfigState: DbConfigState = {
|
||||
...configState,
|
||||
bns_names_onchain_imported: true,
|
||||
bns_subdomains_imported: true,
|
||||
};
|
||||
await db.updateConfigState(updatedConfigState, sql);
|
||||
|
||||
logger.info(`Re-indexing BNS tables. This might take a while...`);
|
||||
await sql`REINDEX TABLE subdomains`;
|
||||
await sql`REINDEX TABLE zonefiles`;
|
||||
await sql`REINDEX TABLE namespaces`;
|
||||
await sql`REINDEX TABLE names`;
|
||||
});
|
||||
|
||||
logger.info('Stacks 1.0 BNS data import completed');
|
||||
logger.info('Stacks 1.0 BNS subdomain import completed');
|
||||
}
|
||||
|
||||
/** A passthrough stream which hashes the data as it passes through. */
|
||||
|
||||
12
src/index.ts
12
src/index.ts
@@ -16,7 +16,6 @@ import { startEventServer } from './event-stream/event-server';
|
||||
import { StacksCoreRpcClient } from './core-rpc/client';
|
||||
import { createServer as createPrometheusServer } from '@promster/server';
|
||||
import { registerShutdownConfig } from './shutdown-handler';
|
||||
import { importV1TokenOfferingData, importV1BnsData } from './import-v1';
|
||||
import { OfflineDummyStore } from './datastore/offline-dummy-store';
|
||||
import { Socket } from 'net';
|
||||
import * as getopts from 'getopts';
|
||||
@@ -126,17 +125,6 @@ async function init(): Promise<void> {
|
||||
registerMempoolPromStats(dbWriteStore.eventEmitter);
|
||||
|
||||
if (apiMode === StacksApiMode.default || apiMode === StacksApiMode.writeOnly) {
|
||||
if (isProdEnv) {
|
||||
await importV1TokenOfferingData(dbWriteStore);
|
||||
} else {
|
||||
logger.warn(`Notice: skipping token offering data import because of non-production NODE_ENV`);
|
||||
}
|
||||
if (isProdEnv && !process.env.BNS_IMPORT_DIR) {
|
||||
logger.warn(`Notice: full BNS functionality requires 'BNS_IMPORT_DIR' to be set.`);
|
||||
} else if (process.env.BNS_IMPORT_DIR) {
|
||||
await importV1BnsData(dbWriteStore, process.env.BNS_IMPORT_DIR);
|
||||
}
|
||||
|
||||
const configuredChainID = getApiConfiguredChainID();
|
||||
const eventServer = await startEventServer({
|
||||
datastore: dbWriteStore,
|
||||
|
||||
@@ -33,11 +33,11 @@ export async function up(pgm: MigrationBuilder): Promise<void> {
|
||||
notNull: true,
|
||||
},
|
||||
base: {
|
||||
type: 'integer',
|
||||
type: 'numeric',
|
||||
notNull: true,
|
||||
},
|
||||
coeff: {
|
||||
type: 'integer',
|
||||
type: 'numeric',
|
||||
notNull: true,
|
||||
},
|
||||
nonalpha_discount: {
|
||||
@@ -91,7 +91,15 @@ export async function up(pgm: MigrationBuilder): Promise<void> {
|
||||
},
|
||||
});
|
||||
|
||||
pgm.createIndex('namespaces', 'index_block_hash', { method: 'hash' });
|
||||
pgm.createIndex('namespaces', 'microblock_hash', { method: 'hash' });
|
||||
pgm.createIndex('namespaces', [{ name: 'ready_block', sort: 'DESC' }]);
|
||||
pgm.createIndex('namespaces', 'index_block_hash');
|
||||
pgm.createIndex('namespaces', [
|
||||
{ name: 'ready_block', sort: 'DESC' },
|
||||
{ name: 'microblock_sequence', sort: 'DESC' },
|
||||
{ name: 'tx_index', sort: 'DESC' },
|
||||
]);
|
||||
pgm.addConstraint(
|
||||
'namespaces',
|
||||
'unique_namespace_id_tx_id_index_block_hash_microblock_hash',
|
||||
'UNIQUE(namespace_id, tx_id, index_block_hash, microblock_hash)'
|
||||
);
|
||||
}
|
||||
|
||||
@@ -83,9 +83,16 @@ export async function up(pgm: MigrationBuilder): Promise<void> {
|
||||
},
|
||||
});
|
||||
|
||||
pgm.createIndex('names', 'tx_id', { method: 'hash' });
|
||||
pgm.createIndex('names', 'name', { method: 'hash' });
|
||||
pgm.createIndex('names', 'index_block_hash', { method: 'hash' });
|
||||
pgm.createIndex('names', 'microblock_hash', { method: 'hash' });
|
||||
pgm.createIndex('names', [{ name: 'registered_at', sort: 'DESC' }]);
|
||||
pgm.createIndex('names', 'namespace_id');
|
||||
pgm.createIndex('names', 'index_block_hash');
|
||||
pgm.createIndex('names', [
|
||||
{ name: 'registered_at', sort: 'DESC' },
|
||||
{ name: 'microblock_sequence', sort: 'DESC' },
|
||||
{ name: 'tx_index', sort: 'DESC' },
|
||||
]);
|
||||
pgm.addConstraint(
|
||||
'names',
|
||||
'unique_name_tx_id_index_block_hash_microblock_hash',
|
||||
'UNIQUE(name, tx_id, index_block_hash, microblock_hash)'
|
||||
);
|
||||
}
|
||||
|
||||
@@ -84,10 +84,16 @@ export async function up(pgm: MigrationBuilder): Promise<void> {
|
||||
},
|
||||
});
|
||||
|
||||
pgm.createIndex('subdomains', 'owner', { method: 'hash' });
|
||||
pgm.createIndex('subdomains', 'zonefile_hash', { method: 'hash' });
|
||||
pgm.createIndex('subdomains', 'fully_qualified_subdomain', { method: 'hash' });
|
||||
pgm.createIndex('subdomains', 'index_block_hash', { method: 'hash' });
|
||||
pgm.createIndex('subdomains', 'microblock_hash', { method: 'hash' });
|
||||
pgm.createIndex('subdomains', [{ name: 'block_height', sort: 'DESC' }]);
|
||||
pgm.createIndex('subdomains', 'name');
|
||||
pgm.createIndex('subdomains', 'index_block_hash');
|
||||
pgm.createIndex('subdomains', [
|
||||
{ name: 'block_height', sort: 'DESC' },
|
||||
{ name: 'microblock_sequence', sort: 'DESC' },
|
||||
{ name: 'tx_index', sort: 'DESC' },
|
||||
]);
|
||||
pgm.addConstraint(
|
||||
'subdomains',
|
||||
'unique_fully_qualified_subdomain_tx_id_index_block_hash_microblock_hash',
|
||||
'UNIQUE(fully_qualified_subdomain, tx_id, index_block_hash, microblock_hash)'
|
||||
);
|
||||
}
|
||||
|
||||
@@ -9,6 +9,10 @@ export async function up(pgm: MigrationBuilder): Promise<void> {
|
||||
type: 'serial',
|
||||
primaryKey: true,
|
||||
},
|
||||
name: {
|
||||
type: 'string',
|
||||
notNull: true,
|
||||
},
|
||||
zonefile: {
|
||||
type: 'string',
|
||||
notNull: true,
|
||||
@@ -16,8 +20,21 @@ export async function up(pgm: MigrationBuilder): Promise<void> {
|
||||
zonefile_hash: {
|
||||
type: 'string',
|
||||
notNull: true,
|
||||
},
|
||||
tx_id: {
|
||||
type: 'bytea',
|
||||
notNull: false,
|
||||
},
|
||||
index_block_hash: {
|
||||
type: 'bytea',
|
||||
notNull: false,
|
||||
}
|
||||
});
|
||||
|
||||
pgm.createIndex('zonefiles', 'zonefile_hash', { method: 'hash' });
|
||||
pgm.addIndex('zonefiles', 'zonefile_hash');
|
||||
pgm.addConstraint(
|
||||
'zonefiles',
|
||||
'unique_name_zonefile_hash_tx_id_index_block_hash',
|
||||
'UNIQUE(name, zonefile_hash, tx_id, index_block_hash)'
|
||||
);
|
||||
}
|
||||
|
||||
98
src/migrations/1660595195398_materialized_view_indexes.ts
Normal file
98
src/migrations/1660595195398_materialized_view_indexes.ts
Normal file
@@ -0,0 +1,98 @@
|
||||
/* eslint-disable @typescript-eslint/camelcase */
|
||||
import { MigrationBuilder, ColumnDefinitions } from 'node-pg-migrate';
|
||||
|
||||
export const shorthands: ColumnDefinitions | undefined = undefined;
|
||||
|
||||
export async function up(pgm: MigrationBuilder): Promise<void> {
|
||||
// Add LIMIT 1 to chain_tip view so we can add the uniqueness index for `block_height`.
|
||||
pgm.dropMaterializedView('chain_tip');
|
||||
pgm.createMaterializedView('chain_tip', {}, `
|
||||
WITH block_tip AS (
|
||||
SELECT block_height, block_hash, index_block_hash
|
||||
FROM blocks
|
||||
WHERE block_height = (SELECT MAX(block_height) FROM blocks WHERE canonical = TRUE)
|
||||
),
|
||||
microblock_tip AS (
|
||||
SELECT microblock_hash, microblock_sequence
|
||||
FROM microblocks, block_tip
|
||||
WHERE microblocks.parent_index_block_hash = block_tip.index_block_hash
|
||||
AND microblock_canonical = true AND canonical = true
|
||||
ORDER BY microblock_sequence DESC
|
||||
LIMIT 1
|
||||
),
|
||||
microblock_count AS (
|
||||
SELECT COUNT(*)::INTEGER AS microblock_count
|
||||
FROM microblocks
|
||||
WHERE canonical = TRUE AND microblock_canonical = TRUE
|
||||
),
|
||||
tx_count AS (
|
||||
SELECT COUNT(*)::INTEGER AS tx_count
|
||||
FROM txs
|
||||
WHERE canonical = TRUE AND microblock_canonical = TRUE
|
||||
AND block_height <= (SELECT MAX(block_height) FROM blocks WHERE canonical = TRUE)
|
||||
),
|
||||
tx_count_unanchored AS (
|
||||
SELECT COUNT(*)::INTEGER AS tx_count_unanchored
|
||||
FROM txs
|
||||
WHERE canonical = TRUE AND microblock_canonical = TRUE
|
||||
)
|
||||
SELECT *, block_tip.block_height AS block_count
|
||||
FROM block_tip
|
||||
LEFT JOIN microblock_tip ON TRUE
|
||||
LEFT JOIN microblock_count ON TRUE
|
||||
LEFT JOIN tx_count ON TRUE
|
||||
LEFT JOIN tx_count_unanchored ON TRUE
|
||||
LIMIT 1
|
||||
`);
|
||||
|
||||
pgm.addIndex('chain_tip', 'block_height', { unique: true });
|
||||
pgm.addIndex('mempool_digest', 'digest', { unique: true });
|
||||
pgm.addIndex('nft_custody', ['asset_identifier', 'value'], { unique: true });
|
||||
pgm.addIndex('nft_custody_unanchored', ['asset_identifier', 'value'], { unique: true });
|
||||
}
|
||||
|
||||
export async function down(pgm: MigrationBuilder): Promise<void> {
|
||||
pgm.dropIndex('chain_tip', 'block_height', { unique: true, ifExists: true });
|
||||
pgm.dropIndex('mempool_digest', 'digest', { unique: true, ifExists: true });
|
||||
pgm.dropIndex('nft_custody', ['asset_identifier', 'value'], { unique: true, ifExists: true });
|
||||
pgm.dropIndex('nft_custody_unanchored', ['asset_identifier', 'value'], { unique: true, ifExists: true });
|
||||
|
||||
pgm.dropMaterializedView('chain_tip');
|
||||
pgm.createMaterializedView('chain_tip', {}, `
|
||||
WITH block_tip AS (
|
||||
SELECT block_height, block_hash, index_block_hash
|
||||
FROM blocks
|
||||
WHERE block_height = (SELECT MAX(block_height) FROM blocks WHERE canonical = TRUE)
|
||||
),
|
||||
microblock_tip AS (
|
||||
SELECT microblock_hash, microblock_sequence
|
||||
FROM microblocks, block_tip
|
||||
WHERE microblocks.parent_index_block_hash = block_tip.index_block_hash
|
||||
AND microblock_canonical = true AND canonical = true
|
||||
ORDER BY microblock_sequence DESC
|
||||
LIMIT 1
|
||||
),
|
||||
microblock_count AS (
|
||||
SELECT COUNT(*)::INTEGER AS microblock_count
|
||||
FROM microblocks
|
||||
WHERE canonical = TRUE AND microblock_canonical = TRUE
|
||||
),
|
||||
tx_count AS (
|
||||
SELECT COUNT(*)::INTEGER AS tx_count
|
||||
FROM txs
|
||||
WHERE canonical = TRUE AND microblock_canonical = TRUE
|
||||
AND block_height <= (SELECT MAX(block_height) FROM blocks WHERE canonical = TRUE)
|
||||
),
|
||||
tx_count_unanchored AS (
|
||||
SELECT COUNT(*)::INTEGER AS tx_count_unanchored
|
||||
FROM txs
|
||||
WHERE canonical = TRUE AND microblock_canonical = TRUE
|
||||
)
|
||||
SELECT *, block_tip.block_height AS block_count
|
||||
FROM block_tip
|
||||
LEFT JOIN microblock_tip ON TRUE
|
||||
LEFT JOIN microblock_count ON TRUE
|
||||
LEFT JOIN tx_count ON TRUE
|
||||
LEFT JOIN tx_count_unanchored ON TRUE
|
||||
`);
|
||||
}
|
||||
@@ -13,6 +13,7 @@ import {
|
||||
DbAssetEventTypeId,
|
||||
DbBlock,
|
||||
DbBnsName,
|
||||
DbBnsNamespace,
|
||||
DbEventTypeId,
|
||||
DbFtEvent,
|
||||
DbMempoolTx,
|
||||
@@ -500,6 +501,49 @@ function testMinerReward(args?: TestMinerRewardArgs): DbMinerReward {
|
||||
};
|
||||
}
|
||||
|
||||
interface TestBnsNamespaceArgs {
|
||||
namespace_id?: string;
|
||||
address?: string;
|
||||
launched_at?: number;
|
||||
reveal_block?: number;
|
||||
ready_block?: number;
|
||||
buckets?: string;
|
||||
base?: bigint;
|
||||
coeff?: bigint;
|
||||
nonalpha_discount?: number;
|
||||
no_vowel_discount?: number;
|
||||
lifetime?: number;
|
||||
status?: string;
|
||||
tx_id?: string;
|
||||
tx_index?: number;
|
||||
canonical?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a test BNS namespace
|
||||
* @param args - Optional namespace data
|
||||
* @returns `DbBnsNamespace`
|
||||
*/
|
||||
function testBnsNamespace(args?: TestBnsNamespaceArgs): DbBnsNamespace {
|
||||
return {
|
||||
namespace_id: args?.namespace_id ?? BNS_NAMESPACE_ID,
|
||||
address: args?.address ?? SENDER_ADDRESS,
|
||||
launched_at: args?.launched_at ?? BLOCK_HEIGHT,
|
||||
reveal_block: args?.reveal_block ?? BLOCK_HEIGHT,
|
||||
ready_block: args?.ready_block ?? BLOCK_HEIGHT,
|
||||
buckets: args?.buckets ?? '1,1,1',
|
||||
base: args?.base ?? 1n,
|
||||
coeff: args?.coeff ?? 1n,
|
||||
nonalpha_discount: args?.nonalpha_discount ?? 0,
|
||||
no_vowel_discount: args?.no_vowel_discount ?? 0,
|
||||
lifetime: args?.lifetime ?? 0,
|
||||
status: args?.status ?? 'ready',
|
||||
tx_id: args?.tx_id ?? TX_ID,
|
||||
tx_index: args?.tx_index ?? 0,
|
||||
canonical: args?.canonical ?? true,
|
||||
};
|
||||
}
|
||||
|
||||
interface TestBnsNameArgs {
|
||||
name?: string;
|
||||
address?: string;
|
||||
@@ -659,12 +703,24 @@ export class TestBlockBuilder {
|
||||
addTxBnsName(args?: TestBnsNameArgs): TestBlockBuilder {
|
||||
const defaultArgs: TestBnsNameArgs = {
|
||||
tx_id: this.txData.tx.tx_id,
|
||||
tx_index: this.txIndex,
|
||||
registered_at: this.block.block_height,
|
||||
};
|
||||
this.txData.names.push(testBnsName({ ...defaultArgs, ...args }));
|
||||
return this;
|
||||
}
|
||||
|
||||
addTxBnsNamespace(args?: TestBnsNamespaceArgs): TestBlockBuilder {
|
||||
const defaultArgs: TestBnsNamespaceArgs = {
|
||||
tx_id: this.txData.tx.tx_id,
|
||||
tx_index: this.txIndex,
|
||||
ready_block: this.block.block_height,
|
||||
reveal_block: this.block.block_height,
|
||||
};
|
||||
this.txData.namespaces.push(testBnsNamespace({ ...defaultArgs, ...args }));
|
||||
return this;
|
||||
}
|
||||
|
||||
build(): DataStoreBlockUpdateData {
|
||||
return this.data;
|
||||
}
|
||||
@@ -750,6 +806,15 @@ export class TestMicroblockStreamBuilder {
|
||||
return this;
|
||||
}
|
||||
|
||||
addTxBnsNamespace(args?: TestBnsNamespaceArgs): TestMicroblockStreamBuilder {
|
||||
const defaultArgs: TestBnsNamespaceArgs = {
|
||||
tx_id: this.txData.tx.tx_id,
|
||||
tx_index: this.txIndex,
|
||||
};
|
||||
this.txData.namespaces.push(testBnsNamespace({ ...defaultArgs, ...args }));
|
||||
return this;
|
||||
}
|
||||
|
||||
build(): DataStoreMicroblockUpdateData {
|
||||
return this.data;
|
||||
}
|
||||
|
||||
@@ -70,7 +70,7 @@ describe('BNS API tests', () => {
|
||||
miner_txid: '0x4321',
|
||||
canonical: true,
|
||||
})
|
||||
.addTx()
|
||||
.addTx({ tx_id: '0x1234' })
|
||||
.addTxNftEvent({
|
||||
asset_event_type_id: DbAssetEventTypeId.Mint,
|
||||
value: bnsNameCV('xyz.abc'),
|
||||
@@ -93,8 +93,8 @@ describe('BNS API tests', () => {
|
||||
const namespace: DbBnsNamespace = {
|
||||
namespace_id: 'abc',
|
||||
address: 'ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH',
|
||||
base: 1,
|
||||
coeff: 1,
|
||||
base: 1n,
|
||||
coeff: 1n,
|
||||
launched_at: 14,
|
||||
lifetime: 1,
|
||||
no_vowel_discount: 1,
|
||||
@@ -290,15 +290,15 @@ describe('BNS API tests', () => {
|
||||
zonefile_offset: 0,
|
||||
parent_zonefile_hash: 'p-test-hash',
|
||||
parent_zonefile_index: 0,
|
||||
block_height: dbBlock.block_height,
|
||||
block_height: 2,
|
||||
tx_index: 0,
|
||||
tx_id: '',
|
||||
tx_id: '0x22',
|
||||
canonical: true,
|
||||
};
|
||||
await db.resolveBnsSubdomains(
|
||||
{
|
||||
index_block_hash: dbBlock.index_block_hash,
|
||||
parent_index_block_hash: dbBlock.parent_index_block_hash,
|
||||
index_block_hash: '0x02',
|
||||
parent_index_block_hash: '0x1234',
|
||||
microblock_hash: '',
|
||||
microblock_sequence: I32_MAX,
|
||||
microblock_canonical: true,
|
||||
@@ -344,8 +344,8 @@ describe('BNS API tests', () => {
|
||||
);
|
||||
|
||||
const query1 = await supertest(api.server).get(`/v1/names/invalid/zonefile/${zonefileHash}`);
|
||||
expect(query1.status).toBe(400);
|
||||
expect(query1.body.error).toBe('Invalid name or subdomain');
|
||||
expect(query1.status).toBe(404);
|
||||
expect(query1.body.error).toBe('No such name or zonefile');
|
||||
expect(query1.type).toBe('application/json');
|
||||
});
|
||||
|
||||
@@ -381,7 +381,7 @@ describe('BNS API tests', () => {
|
||||
|
||||
const query1 = await supertest(api.server).get(`/v1/names/${name}/zonefile/invalidHash`);
|
||||
expect(query1.status).toBe(404);
|
||||
expect(query1.body.error).toBe('No such zonefile');
|
||||
expect(query1.body.error).toBe('No such name or zonefile');
|
||||
expect(query1.type).toBe('application/json');
|
||||
});
|
||||
|
||||
@@ -415,7 +415,7 @@ describe('BNS API tests', () => {
|
||||
.build();
|
||||
await db.update(block);
|
||||
|
||||
// Register another name in block 0 (imported from v1, so no nft_event produced)
|
||||
// Register another name in block 1 (imported from v1, so no nft_event produced)
|
||||
const dbName2: DbBnsName = {
|
||||
name: 'imported.btc',
|
||||
address: address,
|
||||
@@ -423,7 +423,7 @@ describe('BNS API tests', () => {
|
||||
expire_block: 10000,
|
||||
zonefile: 'test-zone-file',
|
||||
zonefile_hash: 'zonefileHash',
|
||||
registered_at: 0,
|
||||
registered_at: 1,
|
||||
canonical: true,
|
||||
tx_id: '',
|
||||
tx_index: 0,
|
||||
@@ -671,13 +671,13 @@ describe('BNS API tests', () => {
|
||||
parent_zonefile_index: 0,
|
||||
block_height: dbBlock.block_height,
|
||||
tx_index: 0,
|
||||
tx_id: '',
|
||||
tx_id: '0x22',
|
||||
canonical: true,
|
||||
};
|
||||
await db.resolveBnsSubdomains(
|
||||
{
|
||||
index_block_hash: dbBlock.index_block_hash,
|
||||
parent_index_block_hash: dbBlock.parent_index_block_hash,
|
||||
index_block_hash: '0x02',
|
||||
parent_index_block_hash: '0x1234',
|
||||
microblock_hash: '',
|
||||
microblock_sequence: I32_MAX,
|
||||
microblock_canonical: true,
|
||||
@@ -695,8 +695,8 @@ describe('BNS API tests', () => {
|
||||
|
||||
test('Fail get zonefile by name - invalid name', async () => {
|
||||
const query1 = await supertest(api.server).get(`/v1/names/invalidName/zonefile`);
|
||||
expect(query1.status).toBe(400);
|
||||
expect(query1.body.error).toBe('Invalid name or subdomain');
|
||||
expect(query1.status).toBe(404);
|
||||
expect(query1.body.error).toBe('No such name or zonefile does not exist');
|
||||
expect(query1.type).toBe('application/json');
|
||||
});
|
||||
|
||||
@@ -765,7 +765,7 @@ describe('BNS API tests', () => {
|
||||
parent_zonefile_index: 0,
|
||||
block_height: dbBlock.block_height,
|
||||
tx_index: 0,
|
||||
tx_id: '',
|
||||
tx_id: '0x1234',
|
||||
canonical: true,
|
||||
};
|
||||
await db.resolveBnsSubdomains(
|
||||
@@ -783,6 +783,15 @@ describe('BNS API tests', () => {
|
||||
`/v1/names/${subdomain.fully_qualified_subdomain}`
|
||||
);
|
||||
expect(query.status).toBe(200);
|
||||
expect(query.body).toStrictEqual({
|
||||
address: "test-address",
|
||||
blockchain: "stacks",
|
||||
last_txid: "0x1234",
|
||||
resolver: "https://registrar.blockstack.org",
|
||||
status: "registered_subdomain",
|
||||
zonefile: "test",
|
||||
zonefile_hash: "test-hash",
|
||||
});
|
||||
});
|
||||
|
||||
test('Success: fqn redirect test', async () => {
|
||||
@@ -799,7 +808,7 @@ describe('BNS API tests', () => {
|
||||
parent_zonefile_index: 0,
|
||||
block_height: dbBlock.block_height,
|
||||
tx_index: 0,
|
||||
tx_id: '',
|
||||
tx_id: '0x1234',
|
||||
canonical: true,
|
||||
};
|
||||
await db.resolveBnsSubdomains(
|
||||
|
||||
95
src/tests-bns/bns-helpers-tests.ts
Normal file
95
src/tests-bns/bns-helpers-tests.ts
Normal file
@@ -0,0 +1,95 @@
|
||||
import {
|
||||
parseNamespaceRawValue,
|
||||
parseNameRawValue,
|
||||
parseZoneFileTxt,
|
||||
} from '../event-stream/bns/bns-helpers';
|
||||
import * as zoneFileParser from 'zone-file';
|
||||
|
||||
describe('BNS helper tests', () => {
|
||||
test('Success: namespace parsed', () => {
|
||||
const expectedNamespace = {
|
||||
namespace_id: 'xyz',
|
||||
address: 'ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH',
|
||||
base: 1n,
|
||||
coeff: 1n,
|
||||
launched_at: 14,
|
||||
lifetime: 1,
|
||||
no_vowel_discount: 1,
|
||||
nonalpha_discount: 1,
|
||||
ready_block: 4,
|
||||
reveal_block: 6,
|
||||
status: 'ready',
|
||||
buckets: '1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1',
|
||||
tx_id: '0x2114c8cda9e829f8b5d3c4163724ae9c4d9142d2bae4a35bffb006408d21c0ab',
|
||||
index_block_hash: '0xdeadbeef',
|
||||
};
|
||||
const namespace = parseNamespaceRawValue(
|
||||
// This value comes from Smart Contract Event (event.contract_event.raw_value)
|
||||
'0x0c00000003096e616d657370616365020000000378797a0a70726f706572746965730c000000050b6c61756e636865642d61740a010000000000000000000000000000000e086c69666574696d650100000000000000000000000000000001106e616d6573706163652d696d706f7274051abf8e82623c380cd870931d48b525d5e12a4d67820e70726963652d66756e6374696f6e0c0000000504626173650100000000000000000000000000000001076275636b6574730b00000010010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000105636f6566660100000000000000000000000000000001116e6f2d766f77656c2d646973636f756e740100000000000000000000000000000001116e6f6e616c7068612d646973636f756e7401000000000000000000000000000000010b72657665616c65642d61740100000000000000000000000000000006067374617475730d000000057265616479',
|
||||
4,
|
||||
'0x2114c8cda9e829f8b5d3c4163724ae9c4d9142d2bae4a35bffb006408d21c0ab',
|
||||
0
|
||||
);
|
||||
expect(namespace?.address).toEqual(expectedNamespace.address);
|
||||
expect(namespace?.namespace_id).toEqual(expectedNamespace.namespace_id);
|
||||
expect(namespace?.base).toEqual(expectedNamespace.base);
|
||||
expect(namespace?.coeff).toEqual(expectedNamespace.coeff);
|
||||
expect(namespace?.launched_at).toEqual(expectedNamespace.launched_at);
|
||||
expect(namespace?.lifetime).toEqual(expectedNamespace.lifetime);
|
||||
expect(namespace?.no_vowel_discount).toEqual(expectedNamespace.no_vowel_discount);
|
||||
expect(namespace?.nonalpha_discount).toEqual(expectedNamespace.nonalpha_discount);
|
||||
expect(namespace?.ready_block).toEqual(expectedNamespace.ready_block);
|
||||
expect(namespace?.reveal_block).toEqual(expectedNamespace.reveal_block);
|
||||
expect(namespace?.status).toEqual(expectedNamespace.status);
|
||||
expect(namespace?.buckets).toEqual(expectedNamespace.buckets);
|
||||
expect(namespace?.tx_id).toEqual(expectedNamespace.tx_id);
|
||||
});
|
||||
|
||||
test('Success: parse name raw value', () => {
|
||||
const expectedName = {
|
||||
attachment: {
|
||||
hash: 'c5217bcb3e52612ff7c835f9bb46a5f86aa73b8d',
|
||||
metadata: {
|
||||
name: 'abcdef',
|
||||
namespace: 'xyz',
|
||||
tx_sender: {
|
||||
type: 0,
|
||||
version: 26,
|
||||
hash160: 'bf8e82623c380cd870931d48b525d5e12a4d6782',
|
||||
},
|
||||
op: 'name-import',
|
||||
},
|
||||
},
|
||||
};
|
||||
const expectedAttachment = expectedName.attachment;
|
||||
const name = parseNameRawValue(
|
||||
// This value comes from Smart Contract Event (event.contract_event.raw_value)
|
||||
'0x0c000000010a6174746163686d656e740c00000003106174746163686d656e742d696e646578010000000000000000000000000000000004686173680200000014c5217bcb3e52612ff7c835f9bb46a5f86aa73b8d086d657461646174610c00000004046e616d650200000006616263646566096e616d657370616365020000000378797a026f700d0000000b6e616d652d696d706f72740974782d73656e646572051abf8e82623c380cd870931d48b525d5e12a4d6782'
|
||||
);
|
||||
const attachment = name.attachment;
|
||||
expect(attachment.hash).toEqual(expectedAttachment.hash);
|
||||
expect(attachment.metadata.name).toEqual(expectedAttachment.metadata.name);
|
||||
expect(attachment.metadata.namespace).toEqual(expectedAttachment.metadata.namespace);
|
||||
expect(attachment.metadata.op).toEqual(expectedAttachment.metadata.op);
|
||||
expect(attachment.metadata.tx_sender.version).toEqual(
|
||||
expectedAttachment.metadata.tx_sender.version
|
||||
);
|
||||
expect(attachment.metadata.tx_sender.hash160).toEqual(
|
||||
expectedAttachment.metadata.tx_sender.hash160
|
||||
);
|
||||
});
|
||||
|
||||
test('Parse TXT', () => {
|
||||
const subdomain = `$ORIGIN abcdef.xyz
|
||||
$TTL 3600
|
||||
asim IN TXT "owner=ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH" "seqn=0" "parts=1" "zf0=JE9SSUdJTiBhc2ltCiRUVEwgMzYwMApfaHR0cHMuX3RjcCBVUkkgMTAgMSAiaHR0cHM6Ly9nYWlhLmJsb2Nrc3RhY2sub3JnL2h1Yi9TVDJaUlgwSzI3R1cwU1AzR0pDRU1IRDk1VFFHSk1LQjdHOVkwWDFNSC9wcm9maWxlLmpzb24iCg=="
|
||||
_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1M3325hr1utdv4HhSAfvYKhapzPP9Axhde/profile.json"
|
||||
_resolver IN URI 10 1 "http://localhost:3000"
|
||||
`;
|
||||
const parsedZoneFile = zoneFileParser.parseZoneFile(subdomain);
|
||||
const zoneFileTxt = parseZoneFileTxt(parsedZoneFile.txt?.[0].txt as string[]);
|
||||
expect(zoneFileTxt.owner).toBe('ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH');
|
||||
expect(zoneFileTxt.parts).toBe('1');
|
||||
expect(zoneFileTxt.seqn).toBe('0');
|
||||
});
|
||||
});
|
||||
@@ -6,8 +6,6 @@ import { createHash } from 'crypto';
|
||||
import { DbTx, DbTxStatus } from '../datastore/common';
|
||||
import { AnchorMode, ChainID, PostConditionMode, someCV } from '@stacks/transactions';
|
||||
import { StacksMocknet } from '@stacks/network';
|
||||
import { PgWriteStore } from '../datastore/pg-write-store';
|
||||
import { cycleMigrations, runMigrations } from '../datastore/migrations';
|
||||
import {
|
||||
broadcastTransaction,
|
||||
bufferCV,
|
||||
@@ -22,9 +20,9 @@ import {
|
||||
import BigNum = require('bn.js');
|
||||
import { logger } from '../helpers';
|
||||
import { testnetKeys } from '../api/routes/debug';
|
||||
import { importV1BnsData } from '../import-v1';
|
||||
import * as assert from 'assert';
|
||||
import { TestBlockBuilder } from '../test-utils/test-builders';
|
||||
import { PgWriteStore } from '../datastore/pg-write-store';
|
||||
import { cycleMigrations, runMigrations } from '../datastore/migrations';
|
||||
|
||||
|
||||
function hash160(bfr: Buffer): Buffer {
|
||||
@@ -97,10 +95,10 @@ describe('BNS integration tests', () => {
|
||||
body: JSON.stringify(body),
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
});
|
||||
const submitResult = await apiResult.json();
|
||||
await apiResult.json();
|
||||
const expectedTxId = '0x' + transaction.txid();
|
||||
const result = await standByForTx(expectedTxId);
|
||||
if (result.status != 1) logger.error('name-import error');
|
||||
if (result.status != 1) throw new Error('result status error');
|
||||
await standbyBnsName(expectedTxId);
|
||||
return transaction;
|
||||
}
|
||||
@@ -169,10 +167,8 @@ describe('BNS integration tests', () => {
|
||||
async function initiateNamespaceNetwork(namespace: string, salt: Buffer, namespaceHash: Buffer, testnetKey: TestnetKey, expiration: number){
|
||||
while (true) {
|
||||
try {
|
||||
const preorderTransaction = await namespacePreorder(namespaceHash, testnetKey);
|
||||
|
||||
const revealTransaction = await namespaceReveal(namespace, salt, testnetKey, expiration);
|
||||
|
||||
await namespacePreorder(namespaceHash, testnetKey);
|
||||
await namespaceReveal(namespace, salt, testnetKey, expiration);
|
||||
break;
|
||||
} catch (e) {
|
||||
console.log('error connection', e);
|
||||
@@ -190,13 +186,10 @@ describe('BNS integration tests', () => {
|
||||
network,
|
||||
anchorMode: AnchorMode.Any
|
||||
};
|
||||
|
||||
const transaction = await makeContractCall(txOptions);
|
||||
await broadcastTransaction(transaction, network);
|
||||
|
||||
const readyResult = await standByForTx('0x' + transaction.txid());
|
||||
if (readyResult.status != 1) logger.error('namespace-ready error');
|
||||
|
||||
return transaction;
|
||||
}
|
||||
async function nameImport(namespace: string, zonefile: string, name: string, testnetKey: TestnetKey) {
|
||||
@@ -474,7 +467,7 @@ describe('BNS integration tests', () => {
|
||||
const zonefile = `$ORIGIN ${name}.${namespace}\n$TTL 3600\n_http._tcp IN URI 10 1 "https://blockstack.s3.amazonaws.com/${name}.${namespace}"\n`;
|
||||
const importZonefile = `$ORIGIN ${name}.${namespace}\n$TTL 3600\n_http._tcp IN URI 10 1 "https://blockstack.s3.amazonaws.com/${name}.${namespace}"\n`;
|
||||
const testnetKey = { pkey: testnetKeys[2].secretKey, address: testnetKeys[2].stacksAddress};
|
||||
// initializing namespace network
|
||||
// initializing namespace network
|
||||
await initiateNamespaceNetwork(namespace, salt, namespaceHash, testnetKey, 12);
|
||||
await namespaceReady(namespace, testnetKey.pkey);
|
||||
|
||||
@@ -510,7 +503,7 @@ describe('BNS integration tests', () => {
|
||||
const namespaceHash = hash160(Buffer.concat([Buffer.from(namespace), salt]));
|
||||
const testnetKey = { pkey: testnetKeys[4].secretKey, address: testnetKeys[4].stacksAddress};
|
||||
const zonefile = `$ORIGIN ${name}.${namespace}\n$TTL 3600\n_http._tcp IN URI 10 1 "https://blockstack.s3.amazonaws.com/${name}.${namespace}"\n`;
|
||||
|
||||
|
||||
// initializing namespace network
|
||||
await initiateNamespaceNetwork(namespace, salt, namespaceHash, testnetKey, 12);
|
||||
await nameImport(namespace, zonefile, name, testnetKey);
|
||||
@@ -524,68 +517,78 @@ describe('BNS integration tests', () => {
|
||||
expect(query1.body.status).toBe('name-revoke');
|
||||
});
|
||||
|
||||
test('name-renewal contract call', async () => {
|
||||
test('name-import/name-renewal contract call', async () => {
|
||||
const zonefile = `new zone file`;
|
||||
const namespace = 'name-renewal';
|
||||
const name = 'renewal';
|
||||
const namespaceHash = hash160(Buffer.concat([Buffer.from(namespace), salt]));
|
||||
const testnetKey = { pkey: testnetKeys[5].secretKey, address: testnetKeys[5].stacksAddress};
|
||||
|
||||
|
||||
// initializing namespace network
|
||||
await initiateNamespaceNetwork(namespace, salt, namespaceHash, testnetKey, 1);
|
||||
await nameImport(namespace, zonefile, name, testnetKey);
|
||||
await namespaceReady(namespace, testnetKey.pkey);
|
||||
|
||||
//name renewal
|
||||
// check expiration block
|
||||
const query0 = await supertest(api.server).get(`/v1/names/${name}.${namespace}`);
|
||||
expect(query0.status).toBe(200);
|
||||
expect(query0.type).toBe('application/json');
|
||||
expect(query0.body.expire_block).toBe(0); // Imported names don't know about their namespaces
|
||||
|
||||
// name renewal
|
||||
await nameRenewal(namespace, zonefile, testnetKey.pkey, name);
|
||||
try {
|
||||
const query1 = await supertest(api.server).get(`/v1/names/${name}.${namespace}`);
|
||||
expect(query1.status).toBe(200);
|
||||
expect(query1.type).toBe('application/json');
|
||||
expect(query1.body.zonefile).toBe(zonefile);
|
||||
expect(query1.body.status).toBe('name-renewal');
|
||||
} catch (err: any) {
|
||||
throw new Error('Error post transaction: ' + err.message);
|
||||
}
|
||||
});
|
||||
|
||||
test('bns v1-import', async () => {
|
||||
await importV1BnsData(db, 'src/tests-bns/import-test-files');
|
||||
|
||||
// test on-chain name import
|
||||
const query1 = await supertest(api.server).get(`/v1/names/zumrai.id`);
|
||||
const query1 = await supertest(api.server).get(`/v1/names/${name}.${namespace}`);
|
||||
expect(query1.status).toBe(200);
|
||||
expect(query1.type).toBe('application/json');
|
||||
expect(query1.body).toEqual({
|
||||
address: 'SP29EJ0SVM2TRZ3XGVTZPVTKF4SV1VMD8C0GA5SK5',
|
||||
blockchain: 'stacks',
|
||||
expire_block: 52595,
|
||||
last_txid: '0x',
|
||||
status: 'name-register',
|
||||
zonefile:
|
||||
'$ORIGIN zumrai.id\n$TTL 3600\n_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1EPno1VcdGx89ukN2we4iVpnFtkHzw8i5d/profile.json"\n\n',
|
||||
zonefile_hash: '853cd126478237bc7392e65091f7ffa5a1556a33',
|
||||
});
|
||||
expect(query1.body.zonefile).toBe(zonefile);
|
||||
expect(query1.body.status).toBe('name-renewal');
|
||||
|
||||
// test subdomain import
|
||||
const query2 = await supertest(api.server).get(`/v1/names/flushreset.id.blockstack`);
|
||||
// Name should appear only once in namespace list
|
||||
const query2 = await supertest(api.server).get(`/v1/namespaces/${namespace}/names`);
|
||||
expect(query2.status).toBe(200);
|
||||
expect(query2.type).toBe('application/json');
|
||||
expect(query2.body).toEqual({
|
||||
address: 'SP2S2F9TCAT43KEJT02YTG2NXVCPZXS1426T63D9H',
|
||||
blockchain: 'stacks',
|
||||
last_txid: '0x',
|
||||
resolver: 'https://registrar.blockstack.org',
|
||||
status: 'registered_subdomain',
|
||||
zonefile:
|
||||
'$ORIGIN flushreset.id.blockstack\n$TTL 3600\n_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1HEznKZ7mK5fmibweM7eAk8SwRgJ1bWY92/profile.json"\n\n',
|
||||
zonefile_hash: '14dc091ebce8ea117e1276d802ee903cc0fdde81',
|
||||
});
|
||||
expect(query2.body).toStrictEqual(["renewal.name-renewal"]);
|
||||
|
||||
const dbquery = await db.getSubdomain({ subdomain: `flushreset.id.blockstack`, includeUnanchored: false });
|
||||
assert(dbquery.found)
|
||||
if (dbquery.result){
|
||||
expect(dbquery.result.name).toBe('id.blockstack');}
|
||||
// check new expiration block, should not be 0
|
||||
const query3 = await supertest(api.server).get(`/v1/names/${name}.${namespace}`);
|
||||
expect(query3.status).toBe(200);
|
||||
expect(query3.type).toBe('application/json');
|
||||
expect(query3.body.expire_block).not.toBe(0);
|
||||
});
|
||||
|
||||
test('name-register/name-renewal contract call', async () => {
|
||||
const saltName = '0000';
|
||||
const zonefile = `new zone file`;
|
||||
const namespace = 'name-renewal2';
|
||||
const name = 'renewal2';
|
||||
const namespaceHash = hash160(Buffer.concat([Buffer.from(namespace), salt]));
|
||||
const testnetKey = { pkey: testnetKeys[5].secretKey, address: testnetKeys[5].stacksAddress};
|
||||
|
||||
// initializing namespace network
|
||||
await initiateNamespaceNetwork(namespace, salt, namespaceHash, testnetKey, 1);
|
||||
await namespaceReady(namespace, testnetKey.pkey);
|
||||
await nameRegister(namespace, saltName, zonefile, testnetKey, name);
|
||||
|
||||
// check expiration block, should not be 0
|
||||
const query0 = await supertest(api.server).get(`/v1/names/${name}.${namespace}`);
|
||||
expect(query0.status).toBe(200);
|
||||
expect(query0.type).toBe('application/json');
|
||||
expect(query0.body.expire_block).not.toBe(0);
|
||||
const prevExpiration = query0.body.expire_block;
|
||||
|
||||
// name renewal
|
||||
await nameRenewal(namespace, zonefile, testnetKey.pkey, name);
|
||||
const query1 = await supertest(api.server).get(`/v1/names/${name}.${namespace}`);
|
||||
expect(query1.status).toBe(200);
|
||||
expect(query1.type).toBe('application/json');
|
||||
expect(query1.body.zonefile).toBe(zonefile);
|
||||
expect(query1.body.status).toBe('name-renewal');
|
||||
|
||||
// check new expiration block, should be greater than the previous one
|
||||
const query3 = await supertest(api.server).get(`/v1/names/${name}.${namespace}`);
|
||||
expect(query3.status).toBe(200);
|
||||
expect(query3.type).toBe('application/json');
|
||||
expect(query3.body.expire_block > prevExpiration).toBe(true);
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
|
||||
594
src/tests-bns/event-server-tests.ts
Normal file
594
src/tests-bns/event-server-tests.ts
Normal file
@@ -0,0 +1,594 @@
|
||||
import { ChainID } from '@stacks/transactions';
|
||||
import { PgDataStore, cycleMigrations, runMigrations } from '../datastore/postgres-store';
|
||||
import { PoolClient } from 'pg';
|
||||
import { bnsNameCV, httpPostRequest } from '../helpers';
|
||||
import { EventStreamServer, startEventServer } from '../event-stream/event-server';
|
||||
import { TestBlockBuilder, TestMicroblockStreamBuilder } from '../test-utils/test-builders';
|
||||
import { DbAssetEventTypeId, DbBnsZoneFile } from '../datastore/common';
|
||||
|
||||
describe('BNS event server tests', () => {
|
||||
let db: PgDataStore;
|
||||
let client: PoolClient;
|
||||
let eventServer: EventStreamServer;
|
||||
|
||||
beforeEach(async () => {
|
||||
process.env.PG_DATABASE = 'postgres';
|
||||
await cycleMigrations();
|
||||
db = await PgDataStore.connect({ usageName: 'tests', withNotifier: false });
|
||||
client = await db.pool.connect();
|
||||
eventServer = await startEventServer({
|
||||
datastore: db,
|
||||
chainId: ChainID.Mainnet,
|
||||
serverHost: '127.0.0.1',
|
||||
serverPort: 0,
|
||||
httpLogLevel: 'debug',
|
||||
});
|
||||
});
|
||||
|
||||
test('namespace-ready called by a contract other than BNS', async () => {
|
||||
const block = new TestBlockBuilder({
|
||||
block_height: 1,
|
||||
index_block_hash: '0x29fe7ba9674b9196fefa28764a35a4603065dc25c9dcf83c56648066f36a8dce',
|
||||
burn_block_height: 749661,
|
||||
burn_block_hash: '0x000000000000000000021e9777470811a937006cf47efceadefca2e8031c4b5f',
|
||||
burn_block_time: 1660638853,
|
||||
})
|
||||
.addTx()
|
||||
.build();
|
||||
await db.update(block);
|
||||
const microblock = new TestMicroblockStreamBuilder()
|
||||
.addMicroblock({
|
||||
microblock_hash: '0x8455c986ef89d09968b96fee0ef5b4625aa3860aa68e70123efa129f48e55c6b',
|
||||
microblock_sequence: 0,
|
||||
parent_index_block_hash: '0x29fe7ba9674b9196fefa28764a35a4603065dc25c9dcf83c56648066f36a8dce'
|
||||
})
|
||||
.build();
|
||||
await db.updateMicroblocks(microblock);
|
||||
const payload = {
|
||||
"events": [
|
||||
{
|
||||
"txid": "0x605aa0554fb5ee7995f9780aa54d63b3d32550b0def95e31bdf3beb0fedefdae",
|
||||
"type": "contract_event",
|
||||
"committed": true,
|
||||
"event_index": 50,
|
||||
"contract_event": {
|
||||
"topic": "print",
|
||||
"raw_value": "0x0c00000003096e616d65737061636502000000046672656e0a70726f706572746965730c000000061963616e2d7570646174652d70726963652d66756e6374696f6e030b6c61756e636865642d61740a0100000000000000000000000000011886086c69666574696d65010000000000000000000000000000cd50106e616d6573706163652d696d706f727406161809f2ab9182b6ff1678f82846131c0709e51cf914636f6d6d756e6974792d68616e646c65732d76320e70726963652d66756e6374696f6e0c000000050462617365010000000c9f2c9cd04674edea3fffffff076275636b6574730b00000010010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000105636f6566660100000000000000000000000000000001116e6f2d766f77656c2d646973636f756e740100000000000000000000000000000001116e6f6e616c7068612d646973636f756e7401000000000000000000000000000000010b72657665616c65642d61740100000000000000000000000000011886067374617475730d000000057265616479",
|
||||
"contract_identifier": "SP000000000000000000002Q6VF78.bns"
|
||||
}
|
||||
}
|
||||
],
|
||||
"block_hash": "0x6be6bfbf5e63ee4333c794b0489a791625ad0724722647b748379fe916bbff55",
|
||||
"miner_txid": "0x1c01668438115f757cfc14210f7f7ba0bee7f9d235c44b8e35c8653ac5879205",
|
||||
"block_height": 2,
|
||||
"transactions": [
|
||||
{
|
||||
"txid": "0x605aa0554fb5ee7995f9780aa54d63b3d32550b0def95e31bdf3beb0fedefdae",
|
||||
"raw_tx": "0x000000000104001809f2ab9182b6ff1678f82846131c0709e51cf900000000000000110000000000000bb80001e2ae2533ed444dcc3dc0118da5c8bbfe5da4c1943b63e3fd9b7389e3f7f384ee417a65d899182ff7791b174a426b947860df5b4006a0cb767aca275af847428d03020000000002161809f2ab9182b6ff1678f82846131c0709e51cf914636f6d6d756e6974792d68616e646c65732d7632106e616d6573706163652d72657665616c0000000402000000046672656e0200000003626f74010000000000000000000000000000cd5009",
|
||||
"status": "success",
|
||||
"tx_index": 46,
|
||||
"raw_result": "0x0703",
|
||||
"contract_abi": null,
|
||||
"execution_cost": {
|
||||
"runtime": 201050,
|
||||
"read_count": 20,
|
||||
"read_length": 92368,
|
||||
"write_count": 4,
|
||||
"write_length": 1386
|
||||
},
|
||||
"microblock_hash": "0x8455c986ef89d09968b96fee0ef5b4625aa3860aa68e70123efa129f48e55c6b",
|
||||
"microblock_sequence": 0,
|
||||
"microblock_parent_hash": "0xea7982ba6a5206b9efc2ab2567eedef3babae4d167619bdc74c7e148717dc208"
|
||||
}
|
||||
],
|
||||
"anchored_cost": {
|
||||
"runtime": 19669668,
|
||||
"read_count": 1420,
|
||||
"read_length": 8457322,
|
||||
"write_count": 143,
|
||||
"write_length": 9331
|
||||
},
|
||||
"burn_block_hash": "0x00000000000000000004afca18622e18a1f36ff19dc1aece341868c042b7f4ac",
|
||||
"burn_block_time": 1660639379,
|
||||
"index_block_hash": "0xd3944c1cf261982ad5d86ad14b1545a2393c0039e378706323927b3a7031a621",
|
||||
"burn_block_height": 749662,
|
||||
"parent_block_hash": "0xea7982ba6a5206b9efc2ab2567eedef3babae4d167619bdc74c7e148717dc208",
|
||||
"parent_microblock": "0x8455c986ef89d09968b96fee0ef5b4625aa3860aa68e70123efa129f48e55c6b",
|
||||
"matured_miner_rewards": [],
|
||||
"parent_burn_block_hash": "0x000000000000000000021e9777470811a937006cf47efceadefca2e8031c4b5f",
|
||||
"parent_index_block_hash": "0x29fe7ba9674b9196fefa28764a35a4603065dc25c9dcf83c56648066f36a8dce",
|
||||
"parent_burn_block_height": 749661,
|
||||
"confirmed_microblocks_cost": {
|
||||
"runtime": 174668984,
|
||||
"read_count": 12067,
|
||||
"read_length": 54026355,
|
||||
"write_count": 1701,
|
||||
"write_length": 134399
|
||||
},
|
||||
"parent_microblock_sequence": 0,
|
||||
"parent_burn_block_timestamp": 1660638853
|
||||
};
|
||||
|
||||
await httpPostRequest({
|
||||
host: '127.0.0.1',
|
||||
port: eventServer.serverAddress.port,
|
||||
path: '/new_block',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: Buffer.from(JSON.stringify(payload), 'utf8'),
|
||||
throwOnNotOK: true,
|
||||
});
|
||||
|
||||
const namespaces = await db.getNamespaceList({ includeUnanchored: true });
|
||||
expect(namespaces.results).toStrictEqual(['fren']);
|
||||
|
||||
const namespace = await db.getNamespace({ namespace: 'fren', includeUnanchored: true });
|
||||
expect(namespace.found).toBe(true);
|
||||
expect(namespace.result?.namespace_id).toBe('fren');
|
||||
expect(namespace.result?.lifetime).toBe(52560);
|
||||
expect(namespace.result?.status).toBe('ready');
|
||||
expect(namespace.result?.ready_block).toBe(2);
|
||||
});
|
||||
|
||||
test('name-transfer called by a contract other than BNS', async () => {
|
||||
const block = new TestBlockBuilder({
|
||||
block_height: 1,
|
||||
block_hash: '0x09458029b7c0e43e015bd3202c0f9512c2b394e0481bfd2bdd096ae7b5b862f2',
|
||||
index_block_hash: '0xad9403fc8d8eaef47816555cac51dca9d934384aa9b2581f9b9085509b2af915',
|
||||
burn_block_height: 743853,
|
||||
burn_block_hash: '0x00000000000000000008b9d65609c6b39bb89d7da35433e4b287835d7112d6d4',
|
||||
burn_block_time: 1657123396,
|
||||
})
|
||||
.addTx({
|
||||
tx_id: '0x1234',
|
||||
sender_address: 'SPP117ENNNDQVQ1G3E0N1AP178GXBTC2YNQ3H7J'
|
||||
})
|
||||
.addTxBnsNamespace({
|
||||
namespace_id: 'btc',
|
||||
lifetime: 1000
|
||||
})
|
||||
.addTxBnsName({
|
||||
name: 'dayslikewater.btc',
|
||||
namespace_id: 'btc',
|
||||
zonefile_hash: 'b472a266d0bd89c13706a4132ccfb16f7c3b9fcb',
|
||||
address: 'SPP117ENNNDQVQ1G3E0N1AP178GXBTC2YNQ3H7J'
|
||||
})
|
||||
.addTxNftEvent({
|
||||
asset_event_type_id: DbAssetEventTypeId.Mint,
|
||||
value: bnsNameCV('dayslikewater.btc'),
|
||||
asset_identifier: 'SP000000000000000000002Q6VF78.bns::names',
|
||||
recipient: 'SPP117ENNNDQVQ1G3E0N1AP178GXBTC2YNQ3H7J',
|
||||
})
|
||||
.build();
|
||||
await db.update(block);
|
||||
const microblock = new TestMicroblockStreamBuilder()
|
||||
.addMicroblock({
|
||||
microblock_hash: '0xccdd11fef1792979bc54a9b686e9cc4fc3d64f2a9b2d8ee9d34fe27bfab783a4',
|
||||
microblock_sequence: 0,
|
||||
parent_index_block_hash: '0xad9403fc8d8eaef47816555cac51dca9d934384aa9b2581f9b9085509b2af915'
|
||||
})
|
||||
.build();
|
||||
await db.updateMicroblocks(microblock);
|
||||
|
||||
const name1 = await db.getName({
|
||||
name: 'dayslikewater.btc',
|
||||
includeUnanchored: true,
|
||||
chainId: ChainID.Mainnet
|
||||
});
|
||||
expect(name1.found).toBe(true);
|
||||
expect(name1.result?.namespace_id).toBe('btc');
|
||||
expect(name1.result?.tx_id).toBe('0x1234');
|
||||
expect(name1.result?.status).toBe('name-register');
|
||||
expect(name1.result?.expire_block).toBe(1001);
|
||||
expect(name1.result?.address).toBe('SPP117ENNNDQVQ1G3E0N1AP178GXBTC2YNQ3H7J');
|
||||
|
||||
const payload = {
|
||||
"events": [
|
||||
{
|
||||
"txid": "0xa75ebee2c824c4943bf8494b101ea7ee7d44191b4a8f761582ce99ef28befb19",
|
||||
"type": "contract_event",
|
||||
"committed": true,
|
||||
"event_index": 74,
|
||||
"contract_event": {
|
||||
"topic": "print",
|
||||
"raw_value": "0x0c000000010a6174746163686d656e740c00000003106174746163686d656e742d696e646578010000000000000000000000000000e52b04686173680200000014b472a266d0bd89c13706a4132ccfb16f7c3b9fcb086d657461646174610c00000004046e616d65020000000d646179736c696b657761746572096e616d6573706163650200000003627463026f700d0000000d6e616d652d7472616e736665720974782d73656e6465720516016084eead6adbeee180dc0a855609d10eaf4c17",
|
||||
"contract_identifier": "SP000000000000000000002Q6VF78.bns"
|
||||
}
|
||||
},
|
||||
{
|
||||
"txid": "0xa75ebee2c824c4943bf8494b101ea7ee7d44191b4a8f761582ce99ef28befb19",
|
||||
"type": "nft_transfer_event",
|
||||
"committed": true,
|
||||
"event_index": 73,
|
||||
"nft_transfer_event": {
|
||||
"sender": "SPP117ENNNDQVQ1G3E0N1AP178GXBTC2YNQ3H7J",
|
||||
"raw_value": "0x0c00000002046e616d65020000000d646179736c696b657761746572096e616d6573706163650200000003627463",
|
||||
"recipient": "SP1TY00PDWJVNVEX7H7KJGS2K2YXHTQMY8C0G1NVP",
|
||||
"asset_identifier": "SP000000000000000000002Q6VF78.bns::names"
|
||||
}
|
||||
},
|
||||
{
|
||||
"txid": "0xa75ebee2c824c4943bf8494b101ea7ee7d44191b4a8f761582ce99ef28befb19",
|
||||
"type": "stx_transfer_event",
|
||||
"committed": true,
|
||||
"event_index": 71,
|
||||
"stx_transfer_event": {
|
||||
"amount": "2500",
|
||||
"sender": "SP2KAF9RF86PVX3NEE27DFV1CQX0T4WGR41X3S45C.bns-marketplace-v3",
|
||||
"recipient": "SP2KAF9RF86PVX3NEE27DFV1CQX0T4WGR41X3S45C"
|
||||
}
|
||||
}
|
||||
],
|
||||
"block_hash": "0x7d18920cc47f731f186fb9f731d2e8d5029bbab6d73fd012ac3e10637a8e4a37",
|
||||
"miner_txid": "0xbed35e9e7eb7f98583c87743d3860ab63f2506f7f1efe24740cd37f7708de0b4",
|
||||
"block_height": 2,
|
||||
"transactions": [
|
||||
{
|
||||
"txid": "0xa75ebee2c824c4943bf8494b101ea7ee7d44191b4a8f761582ce99ef28befb19",
|
||||
"raw_tx": "0x00000000010400016084eead6adbeee180dc0a855609d10eaf4c1700000000000000020000000000000bb80000e452e9d87e94a2a4364e89af3ab44b3ce1117afb6505721ff5b801294e1280f0616ee4d21a6ef9bcca1ea15ac65477e79df3427f7fd6c41c80938f8cca6d2cd0030200000002000316a6a7a70f41adbe8eae708ed7ec2cbf41a272182012626e732d6d61726b6574706c6163652d76330500000000000186a0020216016084eead6adbeee180dc0a855609d10eaf4c1716000000000000000000000000000000000000000003626e73056e616d65730c00000002046e616d65020000000d646179736c696b657761746572096e616d6573706163650200000003627463100216a6a7a70f41adbe8eae708ed7ec2cbf41a272182012626e732d6d61726b6574706c6163652d76330a6163636570742d626964000000030200000003627463020000000d646179736c696b6577617465720a0200000014b472a266d0bd89c13706a4132ccfb16f7c3b9fcb",
|
||||
"status": "success",
|
||||
"tx_index": 25,
|
||||
"raw_result": "0x0703",
|
||||
"contract_abi": null,
|
||||
"execution_cost": {
|
||||
"runtime": 381500,
|
||||
"read_count": 42,
|
||||
"read_length": 96314,
|
||||
"write_count": 9,
|
||||
"write_length": 359
|
||||
},
|
||||
"microblock_hash": null,
|
||||
"microblock_sequence": null,
|
||||
"microblock_parent_hash": null
|
||||
}
|
||||
],
|
||||
"anchored_cost": {
|
||||
"runtime": 44194708,
|
||||
"read_count": 4105,
|
||||
"read_length": 11476905,
|
||||
"write_count": 546,
|
||||
"write_length": 47312
|
||||
},
|
||||
"burn_block_hash": "0x00000000000000000005e28a41cdb7461953b9424b4fd44a9211a145a1c0346d",
|
||||
"burn_block_time": 1657125225,
|
||||
"index_block_hash": "0xb70205d38a8666cbd071239b4ec28ae7d12a2c32341118d7c6d4d1e22f56014e",
|
||||
"burn_block_height": 743854,
|
||||
"parent_block_hash": "0x09458029b7c0e43e015bd3202c0f9512c2b394e0481bfd2bdd096ae7b5b862f2",
|
||||
"parent_microblock": "0xccdd11fef1792979bc54a9b686e9cc4fc3d64f2a9b2d8ee9d34fe27bfab783a4",
|
||||
"matured_miner_rewards": [],
|
||||
"parent_burn_block_hash": "0x00000000000000000008b9d65609c6b39bb89d7da35433e4b287835d7112d6d4",
|
||||
"parent_index_block_hash": "0xad9403fc8d8eaef47816555cac51dca9d934384aa9b2581f9b9085509b2af915",
|
||||
"parent_burn_block_height": 743853,
|
||||
"confirmed_microblocks_cost": {
|
||||
"runtime": 48798,
|
||||
"read_count": 10,
|
||||
"read_length": 40042,
|
||||
"write_count": 3,
|
||||
"write_length": 19
|
||||
},
|
||||
"parent_microblock_sequence": 0,
|
||||
"parent_burn_block_timestamp": 1657123396
|
||||
};
|
||||
|
||||
await httpPostRequest({
|
||||
host: '127.0.0.1',
|
||||
port: eventServer.serverAddress.port,
|
||||
path: '/new_block',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: Buffer.from(JSON.stringify(payload), 'utf8'),
|
||||
throwOnNotOK: true,
|
||||
});
|
||||
|
||||
const name2 = await db.getName({
|
||||
name: 'dayslikewater.btc',
|
||||
includeUnanchored: true,
|
||||
chainId: ChainID.Mainnet
|
||||
});
|
||||
expect(name2.found).toBe(true);
|
||||
expect(name2.result?.namespace_id).toBe('btc');
|
||||
expect(name2.result?.tx_id).toBe('0xa75ebee2c824c4943bf8494b101ea7ee7d44191b4a8f761582ce99ef28befb19');
|
||||
expect(name2.result?.status).toBe('name-transfer');
|
||||
expect(name2.result?.expire_block).toBe(1001); // Unchanged as it was not renewed
|
||||
expect(name2.result?.address).toBe('SP1TY00PDWJVNVEX7H7KJGS2K2YXHTQMY8C0G1NVP');
|
||||
});
|
||||
|
||||
test('name-renewal called with no zonefile_hash', async () => {
|
||||
const block = new TestBlockBuilder({
|
||||
block_height: 1,
|
||||
block_hash: '0xf81ef7f114213b9034a4378345a931a97c781fab398c3d7a2053f0d0bf48d311',
|
||||
index_block_hash: '0xaec282925b5096c0bd98588d25a97e134bcc4f19b6600859fa267cf0ee4eaf2d',
|
||||
burn_block_height: 726955,
|
||||
burn_block_hash: '0x00000000000000000001523f01cb4304d39527454d2eec79817b50c033a5c5d9',
|
||||
burn_block_time: 1647068146,
|
||||
})
|
||||
.addTx({
|
||||
tx_id: '0x1234',
|
||||
sender_address: 'SP3GWTV1SMF9HDS4VY5NMM833CHH266W4YBASVYMZ'
|
||||
})
|
||||
.addTxBnsNamespace({
|
||||
namespace_id: 'id',
|
||||
lifetime: 1000
|
||||
})
|
||||
.addTxBnsName({
|
||||
name: 'friedger.id',
|
||||
namespace_id: 'id',
|
||||
zonefile_hash: 'b472a266d0bd89c13706a4132ccfb16f7c3b9fcb',
|
||||
address: 'SP3GWTV1SMF9HDS4VY5NMM833CHH266W4YBASVYMZ'
|
||||
})
|
||||
.addTxNftEvent({
|
||||
asset_event_type_id: DbAssetEventTypeId.Mint,
|
||||
value: bnsNameCV('friedger.id'),
|
||||
asset_identifier: 'SP000000000000000000002Q6VF78.bns::names',
|
||||
recipient: 'SP3GWTV1SMF9HDS4VY5NMM833CHH266W4YBASVYMZ',
|
||||
})
|
||||
.build();
|
||||
await db.update(block);
|
||||
const microblock = new TestMicroblockStreamBuilder()
|
||||
.addMicroblock({
|
||||
microblock_hash: '0x640362ec47c40de3337491993e42efe60d05187431633ab03c3f5d33e70d1f8e',
|
||||
microblock_sequence: 0,
|
||||
parent_index_block_hash: '0xaec282925b5096c0bd98588d25a97e134bcc4f19b6600859fa267cf0ee4eaf2d'
|
||||
})
|
||||
.build();
|
||||
await db.updateMicroblocks(microblock);
|
||||
|
||||
const name1 = await db.getName({
|
||||
name: 'friedger.id',
|
||||
includeUnanchored: true,
|
||||
chainId: ChainID.Mainnet
|
||||
});
|
||||
expect(name1.found).toBe(true);
|
||||
expect(name1.result?.namespace_id).toBe('id');
|
||||
expect(name1.result?.tx_id).toBe('0x1234');
|
||||
expect(name1.result?.status).toBe('name-register');
|
||||
expect(name1.result?.expire_block).toBe(1001);
|
||||
expect(name1.result?.address).toBe('SP3GWTV1SMF9HDS4VY5NMM833CHH266W4YBASVYMZ');
|
||||
|
||||
const payload = {
|
||||
"events": [],
|
||||
"block_hash": "0xaaee893667244adcb8581abac372f1f8c385d402b71e8e8b4ac91e8066024fd5",
|
||||
"miner_txid": "0x6ff493c6b98b9cff0638c7c5276af8e627b8ed779965a5f1c11bbc0810834b3e",
|
||||
"block_height": 2,
|
||||
"transactions": [
|
||||
{
|
||||
"txid": "0xf037c8da8210e2a348bbecd3bc44901de875d3774c5fce49cb75d95f2dc2ca4d",
|
||||
"raw_tx": "0x00000000010500e1cd6c39a3d316e49bf16b4a20636462231b84f200000000000000000000000000000000000094f2c8529dcb8a55a5cfd4434c68cae9cd54f26f01c656369585db3ba364150a4fead679adf35cf5ba1026656b3873daf3380f48ec6dcc175ada868e531decf5001d04c185cad28a3f5299d3fcbcbcbe66b2e1e227000000000000000000000000000186a0000064cc0eb565e85c0d4110c9a760c8fdad21999409f89320e355f326c144b8ada4268244f80734170cea96f683d2431b59f07f276a10efc80793d4dceef8feb2310302000000000216000000000000000000000000000000000000000003626e730c6e616d652d72656e6577616c000000050200000002696402000000086672696564676572010000000000000000000000000001a72a0909",
|
||||
"status": "success",
|
||||
"tx_index": 2,
|
||||
"raw_result": "0x0703",
|
||||
"contract_abi": null,
|
||||
"execution_cost": {
|
||||
"runtime": 184253,
|
||||
"read_count": 11,
|
||||
"read_length": 43250,
|
||||
"write_count": 1,
|
||||
"write_length": 143
|
||||
},
|
||||
"microblock_hash": null,
|
||||
"microblock_sequence": null,
|
||||
"microblock_parent_hash": null
|
||||
}
|
||||
],
|
||||
"anchored_cost": {
|
||||
"runtime": 28375070,
|
||||
"read_count": 8888,
|
||||
"read_length": 1085153,
|
||||
"write_count": 593,
|
||||
"write_length": 156284
|
||||
},
|
||||
"burn_block_hash": "0x0000000000000000000552fb5fd8c08ad8f1ef30c239369a8a3380ec1566047a",
|
||||
"burn_block_time": 1647068392,
|
||||
"index_block_hash": "0x9ff46918054b1aa94571a60e14921a56977f26af2adcbf4a7f64138566feba48",
|
||||
"burn_block_height": 726956,
|
||||
"parent_block_hash": "0xf81ef7f114213b9034a4378345a931a97c781fab398c3d7a2053f0d0bf48d311",
|
||||
"parent_microblock": "0x640362ec47c40de3337491993e42efe60d05187431633ab03c3f5d33e70d1f8e",
|
||||
"matured_miner_rewards": [],
|
||||
"parent_burn_block_hash": "0x00000000000000000001523f01cb4304d39527454d2eec79817b50c033a5c5d9",
|
||||
"parent_index_block_hash": "0xaec282925b5096c0bd98588d25a97e134bcc4f19b6600859fa267cf0ee4eaf2d",
|
||||
"parent_burn_block_height": 726955,
|
||||
"confirmed_microblocks_cost": {
|
||||
"runtime": 360206,
|
||||
"read_count": 38,
|
||||
"read_length": 95553,
|
||||
"write_count": 8,
|
||||
"write_length": 378
|
||||
},
|
||||
"parent_microblock_sequence": 0,
|
||||
"parent_burn_block_timestamp": 1647068146
|
||||
};
|
||||
|
||||
await httpPostRequest({
|
||||
host: '127.0.0.1',
|
||||
port: eventServer.serverAddress.port,
|
||||
path: '/new_block',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: Buffer.from(JSON.stringify(payload), 'utf8'),
|
||||
throwOnNotOK: true,
|
||||
});
|
||||
|
||||
const name2 = await db.getName({
|
||||
name: 'friedger.id',
|
||||
includeUnanchored: true,
|
||||
chainId: ChainID.Mainnet
|
||||
});
|
||||
expect(name2.found).toBe(true);
|
||||
expect(name2.result?.namespace_id).toBe('id');
|
||||
expect(name2.result?.tx_id).toBe('0xf037c8da8210e2a348bbecd3bc44901de875d3774c5fce49cb75d95f2dc2ca4d');
|
||||
expect(name2.result?.status).toBe('name-renewal');
|
||||
expect(name2.result?.expire_block).toBe(1002); // Updated correctly
|
||||
expect(name2.result?.address).toBe('SP3GWTV1SMF9HDS4VY5NMM833CHH266W4YBASVYMZ');
|
||||
});
|
||||
|
||||
test('/attachments/new with re-orged zonefiles', async () => {
|
||||
const block1 = new TestBlockBuilder({
|
||||
block_height: 1,
|
||||
index_block_hash: '0x0101',
|
||||
})
|
||||
.addTx()
|
||||
.addTxBnsNamespace({ namespace_id: 'btc' })
|
||||
.addTxBnsName({ name: 'jnj.btc', namespace_id: 'btc' })
|
||||
.addTxNftEvent({
|
||||
asset_event_type_id: DbAssetEventTypeId.Mint,
|
||||
value: bnsNameCV('jnj.btc'),
|
||||
asset_identifier: 'SP000000000000000000002Q6VF78.bns::names',
|
||||
recipient: 'ST5RRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1ZA',
|
||||
})
|
||||
.build();
|
||||
await db.update(block1);
|
||||
|
||||
const block2 = new TestBlockBuilder({
|
||||
block_height: 2,
|
||||
index_block_hash: '0x0200',
|
||||
parent_index_block_hash: '0x0101'
|
||||
})
|
||||
.addTx({ tx_id: '0x1212' })
|
||||
.addTxBnsName({
|
||||
name: 'jnj.btc',
|
||||
namespace_id: 'btc',
|
||||
status: 'name-update', // Canonical update
|
||||
tx_id: '0x1212',
|
||||
zonefile_hash: '0x9198e0b61a029671e53bd59aa229e7ae05af35a3'
|
||||
})
|
||||
.build();
|
||||
await db.update(block2);
|
||||
|
||||
const block2b = new TestBlockBuilder({
|
||||
block_height: 2,
|
||||
index_block_hash: '0x0201',
|
||||
parent_index_block_hash: '0x0101'
|
||||
})
|
||||
.addTx({ tx_id: '0x121266' })
|
||||
.addTxBnsName({
|
||||
name: 'jnj.btc',
|
||||
namespace_id: 'btc',
|
||||
status: 'name-update', // Non-canonical update
|
||||
tx_id: '0x121266',
|
||||
zonefile_hash: '0xffff'
|
||||
})
|
||||
.build();
|
||||
await db.update(block2b);
|
||||
|
||||
const block3 = new TestBlockBuilder({
|
||||
block_height: 3,
|
||||
index_block_hash: '0x0300',
|
||||
parent_index_block_hash: '0x0200'
|
||||
})
|
||||
.addTx({ tx_id: '0x3333' })
|
||||
.build();
|
||||
await db.update(block3);
|
||||
|
||||
const payload = [
|
||||
{
|
||||
"tx_id": "0x1212", // Canonical
|
||||
"content": "0x244f524947494e206a6e6a2e6274632e0a2454544c20333630300a5f687474702e5f74637009494e095552490931300931092268747470733a2f2f676169612e626c6f636b737461636b2e6f72672f6875622f317a38417a79684334326e3854766f4661554c326e7363614347487151515755722f70726f66696c652e6a736f6e220a0a",
|
||||
"metadata": "0x0c00000004046e616d6502000000036a6e6a096e616d6573706163650200000003627463026f700d0000000d6e616d652d72656769737465720974782d73656e64657205163763c6b37100efa8261e5fc1b1e8c18cd3fed9b6",
|
||||
"contract_id": "SP000000000000000000002Q6VF78.bns",
|
||||
"block_height": 17307,
|
||||
"content_hash": "0x9198e0b61a029671e53bd59aa229e7ae05af35a3",
|
||||
"attachment_index": 823,
|
||||
"index_block_hash": "0x0200"
|
||||
},
|
||||
{
|
||||
"tx_id": "0x121266", // Non-canonical
|
||||
"content": "0x",
|
||||
"metadata": "0x0c00000004046e616d6502000000036a6e6a096e616d6573706163650200000003627463026f700d0000000d6e616d652d72656769737465720974782d73656e64657205163763c6b37100efa8261e5fc1b1e8c18cd3fed9b6",
|
||||
"contract_id": "SP000000000000000000002Q6VF78.bns",
|
||||
"block_height": 17307,
|
||||
"content_hash": "0xffff",
|
||||
"attachment_index": 823,
|
||||
"index_block_hash": "0x0201"
|
||||
},
|
||||
];
|
||||
|
||||
await httpPostRequest({
|
||||
host: '127.0.0.1',
|
||||
port: eventServer.serverAddress.port,
|
||||
path: '/attachments/new',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: Buffer.from(JSON.stringify(payload), 'utf8'),
|
||||
throwOnNotOK: true,
|
||||
});
|
||||
|
||||
const name = await db.getName({ name: 'jnj.btc', chainId: ChainID.Mainnet, includeUnanchored: true });
|
||||
expect(name.found).toBe(true);
|
||||
expect(name.result?.zonefile_hash).toBe('9198e0b61a029671e53bd59aa229e7ae05af35a3');
|
||||
expect(name.result?.index_block_hash).toBe('0x0200');
|
||||
expect(name.result?.tx_id).toBe('0x1212');
|
||||
expect(name.result?.status).toBe('name-update');
|
||||
});
|
||||
|
||||
test('/attachments/new with duplicate zonefiles for the same tx', async () => {
|
||||
const block1 = new TestBlockBuilder({
|
||||
block_height: 1,
|
||||
index_block_hash: '0x0101',
|
||||
})
|
||||
.addTx({ tx_id: '0x1234' })
|
||||
.addTxBnsNamespace({ namespace_id: 'btc' })
|
||||
.addTxBnsName({
|
||||
name: 'jnj.btc',
|
||||
namespace_id: 'btc',
|
||||
zonefile_hash: '0x9198e0b61a029671e53bd59aa229e7ae05af35a3'
|
||||
})
|
||||
.addTxNftEvent({
|
||||
asset_event_type_id: DbAssetEventTypeId.Mint,
|
||||
value: bnsNameCV('jnj.btc'),
|
||||
asset_identifier: 'SP000000000000000000002Q6VF78.bns::names',
|
||||
recipient: 'ST5RRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1ZA',
|
||||
})
|
||||
.build();
|
||||
await db.update(block1);
|
||||
|
||||
const payload = [
|
||||
{
|
||||
"tx_id": "0x1234",
|
||||
"content": "0x",
|
||||
"metadata": "0x0c00000004046e616d6502000000036a6e6a096e616d6573706163650200000003627463026f700d0000000d6e616d652d72656769737465720974782d73656e64657205163763c6b37100efa8261e5fc1b1e8c18cd3fed9b6",
|
||||
"contract_id": "SP000000000000000000002Q6VF78.bns",
|
||||
"block_height": 1,
|
||||
"content_hash": "0x9198e0b61a029671e53bd59aa229e7ae05af35a3",
|
||||
"attachment_index": 823,
|
||||
"index_block_hash": "0x0101"
|
||||
},
|
||||
{
|
||||
"tx_id": "0x1234",
|
||||
"content": "0x244f524947494e206a6e6a2e6274632e0a2454544c20333630300a5f687474702e5f74637009494e095552490931300931092268747470733a2f2f676169612e626c6f636b737461636b2e6f72672f6875622f317a38417a79684334326e3854766f4661554c326e7363614347487151515755722f70726f66696c652e6a736f6e220a0a",
|
||||
"metadata": "0x0c00000004046e616d6502000000036a6e6a096e616d6573706163650200000003627463026f700d0000000d6e616d652d72656769737465720974782d73656e64657205163763c6b37100efa8261e5fc1b1e8c18cd3fed9b6",
|
||||
"contract_id": "SP000000000000000000002Q6VF78.bns",
|
||||
"block_height": 1,
|
||||
"content_hash": "0x9198e0b61a029671e53bd59aa229e7ae05af35a3", // Same zonefile_hash but different content, this should overwrite the entry above
|
||||
"attachment_index": 823,
|
||||
"index_block_hash": "0x0101"
|
||||
},
|
||||
{
|
||||
"tx_id": "0x1234",
|
||||
"content": "0x244f524947494e206a6e6a2e6274632e0a2454544c20333630300a5f687474702e5f74637009494e095552490931300931092268747470733a2f2f676169612e626c6f636b737461636b2e6f72672f6875622f317a38417a79684334326e3854766f4661554c326e7363614347487151515755722f70726f66696c652e6a736f6e220a0a",
|
||||
"metadata": "0x0c00000004046e616d6502000000036a6e6a096e616d6573706163650200000003627463026f700d0000000d6e616d652d72656769737465720974782d73656e64657205163763c6b37100efa8261e5fc1b1e8c18cd3fed9b6",
|
||||
"contract_id": "SP000000000000000000002Q6VF78.bns",
|
||||
"block_height": 1,
|
||||
"content_hash": "0x9198e0b61a029671e53bd59aa229e7ae05af35a3", // Also overwrite
|
||||
"attachment_index": 823,
|
||||
"index_block_hash": "0x0101"
|
||||
},
|
||||
];
|
||||
|
||||
await httpPostRequest({
|
||||
host: '127.0.0.1',
|
||||
port: eventServer.serverAddress.port,
|
||||
path: '/attachments/new',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: Buffer.from(JSON.stringify(payload), 'utf8'),
|
||||
throwOnNotOK: true,
|
||||
});
|
||||
|
||||
// To validate table data we'll query it directly. There should only be one zonefile.
|
||||
const result = await client.query<DbBnsZoneFile>(`SELECT * FROM zonefiles`);
|
||||
expect(result.rowCount).toBe(1);
|
||||
expect(result.rows[0].zonefile).toBe('$ORIGIN jnj.btc.\n$TTL 3600\n_http._tcp\tIN\tURI\t10\t1\t"https://gaia.blockstack.org/hub/1z8AzyhC42n8TvoFaUL2nscaCGHqQQWUr/profile.json"\n\n');
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await eventServer.closeAsync();
|
||||
client.release();
|
||||
await db?.close();
|
||||
await runMigrations(undefined, 'down');
|
||||
});
|
||||
});
|
||||
174
src/tests-bns/v1-import-tests.ts
Normal file
174
src/tests-bns/v1-import-tests.ts
Normal file
@@ -0,0 +1,174 @@
|
||||
import { PgDataStore, cycleMigrations, runMigrations } from '../datastore/postgres-store';
|
||||
import { PoolClient } from 'pg';
|
||||
import { ApiServer, startApiServer } from '../api/init';
|
||||
import * as supertest from 'supertest';
|
||||
import { startEventServer } from '../event-stream/event-server';
|
||||
import { Server } from 'net';
|
||||
import { ChainID } from '@stacks/transactions';
|
||||
import { importV1BnsNames, importV1BnsSubdomains } from '../import-v1';
|
||||
import * as assert from 'assert';
|
||||
import { TestBlockBuilder } from '../test-utils/test-builders';
|
||||
import { DataStoreBlockUpdateData } from '../datastore/common';
|
||||
import { BnsGenesisBlock } from '../event-replay/helpers';
|
||||
|
||||
describe('BNS V1 import', () => {
|
||||
let db: PgDataStore;
|
||||
let client: PoolClient;
|
||||
let eventServer: Server;
|
||||
let api: ApiServer;
|
||||
let block: DataStoreBlockUpdateData;
|
||||
|
||||
beforeEach(async () => {
|
||||
process.env.PG_DATABASE = 'postgres';
|
||||
await cycleMigrations();
|
||||
db = await PgDataStore.connect({ usageName: 'tests' });
|
||||
client = await db.pool.connect();
|
||||
eventServer = await startEventServer({ datastore: db, chainId: ChainID.Testnet, httpLogLevel: 'silly' });
|
||||
api = await startApiServer({ datastore: db, chainId: ChainID.Testnet, httpLogLevel: 'silly' });
|
||||
|
||||
block = new TestBlockBuilder().addTx().build();
|
||||
await db.update(block);
|
||||
});
|
||||
|
||||
test('v1-import', async () => {
|
||||
const genesis: BnsGenesisBlock = {
|
||||
index_block_hash: block.block.index_block_hash,
|
||||
parent_index_block_hash: block.block.parent_index_block_hash,
|
||||
microblock_canonical: true,
|
||||
microblock_hash: block.block.parent_microblock_hash,
|
||||
microblock_sequence: block.block.parent_microblock_sequence,
|
||||
tx_id: block.txs[0].tx.tx_id,
|
||||
tx_index: block.txs[0].tx.tx_index,
|
||||
};
|
||||
await importV1BnsNames(db, 'src/tests-bns/import-test-files', genesis);
|
||||
await importV1BnsSubdomains(db, 'src/tests-bns/import-test-files', genesis);
|
||||
|
||||
// Names
|
||||
const query1 = await supertest(api.server).get(`/v1/names/zumrai.id`);
|
||||
expect(query1.status).toBe(200);
|
||||
expect(query1.type).toBe('application/json');
|
||||
expect(query1.body).toEqual({
|
||||
address: 'SP29EJ0SVM2TRZ3XGVTZPVTKF4SV1VMD8C0GA5SK5',
|
||||
blockchain: 'stacks',
|
||||
expire_block: 52596,
|
||||
last_txid: '0x1234',
|
||||
status: 'name-register',
|
||||
zonefile:
|
||||
'$ORIGIN zumrai.id\n$TTL 3600\n_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1EPno1VcdGx89ukN2we4iVpnFtkHzw8i5d/profile.json"\n\n',
|
||||
zonefile_hash: '853cd126478237bc7392e65091f7ffa5a1556a33',
|
||||
});
|
||||
|
||||
const query2 = await supertest(api.server).get(`/v1/names/zumrai.id/zonefile/853cd126478237bc7392e65091f7ffa5a1556a33`);
|
||||
expect(query2.status).toBe(200);
|
||||
expect(query2.type).toBe('application/json');
|
||||
expect(query2.body).toEqual({
|
||||
zonefile: '$ORIGIN zumrai.id\n$TTL 3600\n_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1EPno1VcdGx89ukN2we4iVpnFtkHzw8i5d/profile.json"\n\n',
|
||||
});
|
||||
|
||||
const query3 = await supertest(api.server).get(`/v1/names/zumrai.id/zonefile`);
|
||||
expect(query3.status).toBe(200);
|
||||
expect(query3.type).toBe('application/json');
|
||||
expect(query3.body).toEqual({
|
||||
zonefile: '$ORIGIN zumrai.id\n$TTL 3600\n_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1EPno1VcdGx89ukN2we4iVpnFtkHzw8i5d/profile.json"\n\n',
|
||||
});
|
||||
|
||||
const query4 = await supertest(api.server).get(`/v1/names/id.blockstack/subdomains`);
|
||||
expect(query4.status).toBe(200);
|
||||
expect(query4.type).toBe('application/json');
|
||||
expect(query4.body.sort()).toStrictEqual([
|
||||
"12312313231.id.blockstack", "aichamez.id.blockstack", "ale082308as.id.blockstack",
|
||||
"alejandro772.id.blockstack", "alkorsandor8_2.id.blockstack", "amir4good.id.blockstack",
|
||||
"anasa680.id.blockstack", "ancafajardo.id.blockstack", "angelessebastian.id.blockstack",
|
||||
"blafus3l.id.blockstack", "caomicoje.id.blockstack", "con_adrada34516.id.blockstack",
|
||||
"cryptichorizon.id.blockstack", "drgenius.id.blockstack", "drifting_dude.id.blockstack",
|
||||
"enavarrocollin.id.blockstack", "entryist.id.blockstack", "flushreset.id.blockstack",
|
||||
"harukoscarlet.id.blockstack", "hintonh924.id.blockstack", "johnkinney.id.blockstack",
|
||||
"jokialternative.id.blockstack", "joren_instance.id.blockstack", "kerodriguez.id.blockstack",
|
||||
"krishares10.id.blockstack", "liviaelyse.id.blockstack", "luke_mwenya1.id.blockstack",
|
||||
"milkyymocha.id.blockstack", "mithical.id.blockstack", "mrbotham.id.blockstack",
|
||||
"mymansgotabeefy1.id.blockstack", "neelyblake996.id.blockstack", "nihal_t_m.id.blockstack",
|
||||
"okamii63.id.blockstack", "robertascardoso.id.blockstack", "sheridoug.id.blockstack",
|
||||
"sipapi19.id.blockstack", "slemanb44.id.blockstack", "slimttfu.id.blockstack",
|
||||
"splevine.id.blockstack", "sportsman66.id.blockstack", "starbvuks.id.blockstack",
|
||||
"subtly_fresh.id.blockstack", "svirchok.id.blockstack", "theironcook.id.blockstack",
|
||||
"thingnotok.id.blockstack", "ujku1977.id.blockstack", "yanadda9.id.blockstack",
|
||||
"yoemmx00.id.blockstack", "zachgaming.id.blockstack"
|
||||
].sort());
|
||||
|
||||
const query5 = await supertest(api.server).get(`/v1/names/`);
|
||||
expect(query5.status).toBe(200);
|
||||
expect(query5.type).toBe('application/json');
|
||||
expect(query5.body.sort()).toStrictEqual([
|
||||
"0.id", "1.id", "10.id", "10x.id", "111111111.id", "123.id", "zinai.id", "zlh.id",
|
||||
"zone117x.id", "zumminer_crux.id", "zumminer_dev_crux.id", "zumrai.id",
|
||||
].sort());
|
||||
|
||||
// Namespaces
|
||||
const query6 = await supertest(api.server).get(`/v1/namespaces/`);
|
||||
expect(query6.status).toBe(200);
|
||||
expect(query6.type).toBe('application/json');
|
||||
expect(query6.body).toEqual({
|
||||
namespaces: ["blockstack", "graphite", "helloworld", "id", "podcast"]
|
||||
});
|
||||
|
||||
const query7 = await supertest(api.server).get(`/v1/namespaces/id/names`);
|
||||
expect(query7.status).toBe(200);
|
||||
expect(query7.type).toBe('application/json');
|
||||
expect(query7.body.sort()).toStrictEqual([
|
||||
"0.id", "1.id", "10.id", "10x.id", "111111111.id", "123.id", "zinai.id", "zlh.id",
|
||||
"zone117x.id", "zumminer_crux.id", "zumminer_dev_crux.id", "zumrai.id"
|
||||
].sort());
|
||||
|
||||
// Addresses
|
||||
const query8 = await supertest(api.server).get(`/v1/addresses/stacks/SP1HPCXTGV31W5659M3WTBEFP5AN55HV4B1Q9T31F`);
|
||||
expect(query8.status).toBe(200);
|
||||
expect(query8.type).toBe('application/json');
|
||||
expect(query8.body).toEqual({
|
||||
names: ["0.id"]
|
||||
});
|
||||
|
||||
// Subdomains
|
||||
const query9 = await supertest(api.server).get(`/v1/names/flushreset.id.blockstack`);
|
||||
expect(query9.status).toBe(200);
|
||||
expect(query9.type).toBe('application/json');
|
||||
expect(query9.body).toEqual({
|
||||
address: 'SP2S2F9TCAT43KEJT02YTG2NXVCPZXS1426T63D9H',
|
||||
blockchain: 'stacks',
|
||||
last_txid: '0x1234',
|
||||
resolver: 'https://registrar.blockstack.org',
|
||||
status: 'registered_subdomain',
|
||||
zonefile:
|
||||
'$ORIGIN flushreset.id.blockstack\n$TTL 3600\n_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1HEznKZ7mK5fmibweM7eAk8SwRgJ1bWY92/profile.json"\n\n',
|
||||
zonefile_hash: '14dc091ebce8ea117e1276d802ee903cc0fdde81',
|
||||
});
|
||||
|
||||
const query10 = await supertest(api.server).get(`/v1/names/flushreset.id.blockstack/zonefile/14dc091ebce8ea117e1276d802ee903cc0fdde81`);
|
||||
expect(query10.status).toBe(200);
|
||||
expect(query10.type).toBe('application/json');
|
||||
expect(query10.body).toEqual({
|
||||
zonefile:
|
||||
'$ORIGIN flushreset.id.blockstack\n$TTL 3600\n_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1HEznKZ7mK5fmibweM7eAk8SwRgJ1bWY92/profile.json"\n\n',
|
||||
});
|
||||
|
||||
const query11 = await supertest(api.server).get(`/v1/names/flushreset.id.blockstack/zonefile`);
|
||||
expect(query11.status).toBe(200);
|
||||
expect(query11.type).toBe('application/json');
|
||||
expect(query11.body).toEqual({
|
||||
zonefile:
|
||||
'$ORIGIN flushreset.id.blockstack\n$TTL 3600\n_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1HEznKZ7mK5fmibweM7eAk8SwRgJ1bWY92/profile.json"\n\n',
|
||||
});
|
||||
|
||||
const dbquery = await db.getSubdomain({ subdomain: `flushreset.id.blockstack`, includeUnanchored: false });
|
||||
assert(dbquery.found)
|
||||
if (dbquery.result){
|
||||
expect(dbquery.result.name).toBe('id.blockstack');}
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await new Promise(resolve => eventServer.close(() => resolve(true)));
|
||||
await api.terminate();
|
||||
client.release();
|
||||
await db?.close();
|
||||
await runMigrations(undefined, 'down');
|
||||
});
|
||||
});
|
||||
@@ -1,95 +0,0 @@
|
||||
import { parseNamespaceRawValue, parseNameRawValue, parseZoneFileTxt } from '../bns-helpers';
|
||||
import * as zoneFileParser from 'zone-file';
|
||||
test('Success: namespace parsed', () => {
|
||||
const expectedNamespace = {
|
||||
namespace_id: 'xyz',
|
||||
address: 'ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH',
|
||||
base: 1,
|
||||
coeff: 1,
|
||||
launched_at: 14,
|
||||
lifetime: 1,
|
||||
no_vowel_discount: 1,
|
||||
nonalpha_discount: 1,
|
||||
ready_block: 4,
|
||||
reveal_block: 6,
|
||||
status: 'ready',
|
||||
buckets: '1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1',
|
||||
tx_id: '0x2114c8cda9e829f8b5d3c4163724ae9c4d9142d2bae4a35bffb006408d21c0ab',
|
||||
index_block_hash: '0xdeadbeef',
|
||||
};
|
||||
|
||||
const namespace = parseNamespaceRawValue(
|
||||
// This value comes from Smart Contract Event (event.contract_event.raw_value)
|
||||
'0x0c00000003096e616d657370616365020000000378797a0a70726f706572746965730c000000050b6c61756e636865642d61740a010000000000000000000000000000000e086c69666574696d650100000000000000000000000000000001106e616d6573706163652d696d706f7274051abf8e82623c380cd870931d48b525d5e12a4d67820e70726963652d66756e6374696f6e0c0000000504626173650100000000000000000000000000000001076275636b6574730b00000010010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000105636f6566660100000000000000000000000000000001116e6f2d766f77656c2d646973636f756e740100000000000000000000000000000001116e6f6e616c7068612d646973636f756e7401000000000000000000000000000000010b72657665616c65642d61740100000000000000000000000000000006067374617475730d000000057265616479',
|
||||
4,
|
||||
'0x2114c8cda9e829f8b5d3c4163724ae9c4d9142d2bae4a35bffb006408d21c0ab',
|
||||
0
|
||||
);
|
||||
|
||||
expect(namespace?.address).toEqual(expectedNamespace.address);
|
||||
expect(namespace?.namespace_id).toEqual(expectedNamespace.namespace_id);
|
||||
expect(namespace?.base).toEqual(expectedNamespace.base);
|
||||
expect(namespace?.coeff).toEqual(expectedNamespace.coeff);
|
||||
expect(namespace?.launched_at).toEqual(expectedNamespace.launched_at);
|
||||
expect(namespace?.lifetime).toEqual(expectedNamespace.lifetime);
|
||||
expect(namespace?.no_vowel_discount).toEqual(expectedNamespace.no_vowel_discount);
|
||||
expect(namespace?.nonalpha_discount).toEqual(expectedNamespace.nonalpha_discount);
|
||||
expect(namespace?.ready_block).toEqual(expectedNamespace.ready_block);
|
||||
expect(namespace?.reveal_block).toEqual(expectedNamespace.reveal_block);
|
||||
expect(namespace?.status).toEqual(expectedNamespace.status);
|
||||
expect(namespace?.buckets).toEqual(expectedNamespace.buckets);
|
||||
expect(namespace?.tx_id).toEqual(expectedNamespace.tx_id);
|
||||
});
|
||||
|
||||
test('Success: parse name raw value', () => {
|
||||
const expectedName = {
|
||||
attachment: {
|
||||
hash: 'c5217bcb3e52612ff7c835f9bb46a5f86aa73b8d',
|
||||
metadata: {
|
||||
name: 'abcdef',
|
||||
namespace: 'xyz',
|
||||
tx_sender: {
|
||||
type: 0,
|
||||
version: 26,
|
||||
hash160: 'bf8e82623c380cd870931d48b525d5e12a4d6782',
|
||||
},
|
||||
op: 'name-import',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const expectedAttachment = expectedName.attachment;
|
||||
|
||||
const name = parseNameRawValue(
|
||||
// This value comes from Smart Contract Event (event.contract_event.raw_value)
|
||||
'0x0c000000010a6174746163686d656e740c00000003106174746163686d656e742d696e646578010000000000000000000000000000000004686173680200000014c5217bcb3e52612ff7c835f9bb46a5f86aa73b8d086d657461646174610c00000004046e616d650200000006616263646566096e616d657370616365020000000378797a026f700d0000000b6e616d652d696d706f72740974782d73656e646572051abf8e82623c380cd870931d48b525d5e12a4d6782'
|
||||
);
|
||||
|
||||
const attachment = name.attachment;
|
||||
|
||||
expect(attachment.hash).toEqual(expectedAttachment.hash);
|
||||
expect(attachment.metadata.name).toEqual(expectedAttachment.metadata.name);
|
||||
expect(attachment.metadata.namespace).toEqual(expectedAttachment.metadata.namespace);
|
||||
expect(attachment.metadata.op).toEqual(expectedAttachment.metadata.op);
|
||||
expect(attachment.metadata.tx_sender.version).toEqual(
|
||||
expectedAttachment.metadata.tx_sender.version
|
||||
);
|
||||
expect(attachment.metadata.tx_sender.hash160).toEqual(
|
||||
expectedAttachment.metadata.tx_sender.hash160
|
||||
);
|
||||
});
|
||||
|
||||
test('Parse TXT', () => {
|
||||
const subdomain = `$ORIGIN abcdef.xyz
|
||||
$TTL 3600
|
||||
asim IN TXT "owner=ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH" "seqn=0" "parts=1" "zf0=JE9SSUdJTiBhc2ltCiRUVEwgMzYwMApfaHR0cHMuX3RjcCBVUkkgMTAgMSAiaHR0cHM6Ly9nYWlhLmJsb2Nrc3RhY2sub3JnL2h1Yi9TVDJaUlgwSzI3R1cwU1AzR0pDRU1IRDk1VFFHSk1LQjdHOVkwWDFNSC9wcm9maWxlLmpzb24iCg=="
|
||||
_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1M3325hr1utdv4HhSAfvYKhapzPP9Axhde/profile.json"
|
||||
_resolver IN URI 10 1 "http://localhost:3000"
|
||||
`;
|
||||
|
||||
const parsedZoneFile = zoneFileParser.parseZoneFile(subdomain);
|
||||
const zoneFileTxt = parseZoneFileTxt(parsedZoneFile.txt?.[0].txt as string[]);
|
||||
expect(zoneFileTxt.owner).toBe('ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH');
|
||||
expect(zoneFileTxt.parts).toBe('1');
|
||||
expect(zoneFileTxt.seqn).toBe('0');
|
||||
});
|
||||
@@ -2833,8 +2833,8 @@ describe('postgres datastore', () => {
|
||||
tx_index: 0,
|
||||
namespace_id: 'abc',
|
||||
address: 'ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH',
|
||||
base: 1,
|
||||
coeff: 1,
|
||||
base: 1n,
|
||||
coeff: 1n,
|
||||
launched_at: 14,
|
||||
lifetime: 1,
|
||||
no_vowel_discount: 1,
|
||||
@@ -3833,8 +3833,8 @@ describe('postgres datastore', () => {
|
||||
{
|
||||
namespace_id: 'abc',
|
||||
address: 'ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH',
|
||||
base: 1,
|
||||
coeff: 1,
|
||||
base: 1n,
|
||||
coeff: 1n,
|
||||
launched_at: 14,
|
||||
lifetime: 1,
|
||||
no_vowel_discount: 1,
|
||||
@@ -4027,8 +4027,8 @@ describe('postgres datastore', () => {
|
||||
{
|
||||
namespace_id: 'abc',
|
||||
address: 'ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH',
|
||||
base: 1,
|
||||
coeff: 1,
|
||||
base: 1n,
|
||||
coeff: 1n,
|
||||
launched_at: 14,
|
||||
lifetime: 1,
|
||||
no_vowel_discount: 1,
|
||||
@@ -4549,8 +4549,8 @@ describe('postgres datastore', () => {
|
||||
const namespace: DbBnsNamespace = {
|
||||
namespace_id: 'abc',
|
||||
address: 'ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH',
|
||||
base: 1,
|
||||
coeff: 1,
|
||||
base: 1n,
|
||||
coeff: 1n,
|
||||
launched_at: dbBlock.block_height,
|
||||
lifetime: 1,
|
||||
no_vowel_discount: 1,
|
||||
@@ -4683,8 +4683,7 @@ describe('postgres datastore', () => {
|
||||
|
||||
const subdomains: DbBnsSubdomain[] = [];
|
||||
subdomains.push(subdomain);
|
||||
await db.updateBatchSubdomains(
|
||||
client,
|
||||
await db.resolveBnsSubdomains(
|
||||
{
|
||||
index_block_hash: dbBlock.index_block_hash,
|
||||
parent_index_block_hash: dbBlock.parent_index_block_hash,
|
||||
|
||||
Reference in New Issue
Block a user