feat!: optimize tables and improve canonical treatment of BNS data (#1287)

* refactor: re-order helper files

* fix: new columns for zonefiles

* feat: batch update subdomains

* fix: optimize some queries

* fix: api tests

* fix: optimize more queries, add cache handlers

* fix: some integration tests

* fix: all integration tests

* fix: v1 import test individually

* fix: move v1 import tests to separate file

* fix: start adding on conflict to subdomains

* fix: add uniqueness to names, subdomains and namespaces

* fix: remove extra index

* refactor: remove unused tx function

* fix: unused exports

* feat: echo block height while doing replays

* refactor: separate batch updates

* fix: calculate expire_block for a name based on namespace lifetime

* fix: test strict equal

* fix: remove secondary temporary table for event import

* fix: test sorting

* fix: detect namespaces readyed by third party contracts

* fix: bigint for namespaces

* refactor: move bns helper tests to bns suite

* chore: make event test less verbose

* chore: temporarily disable migrations on boot

* chore: add verbose logs to migration

* chore: make migration verbosity depend on log level

* fix: detect names from print event

* test: re-orgd attachments

* test: duplicate zonefile updates
This commit is contained in:
Rafael Cárdenas
2022-08-26 10:14:37 -05:00
committed by GitHub
parent a10ac03b9d
commit 1f648187b8
25 changed files with 1401 additions and 722 deletions

View File

@@ -3,13 +3,20 @@ import { asyncHandler } from '../../async-handler';
import { DataStore } from '../../../datastore/common';
import { isUnanchoredRequest } from '../../query-helpers';
import { ChainID } from '@stacks/transactions';
import {
getETagCacheHandler,
setETagCacheHeaders,
} from '../../../api/controllers/cache-controller';
const SUPPORTED_BLOCKCHAINS = ['stacks'];
export function createBnsAddressesRouter(db: DataStore, chainId: ChainID): express.Router {
const router = express.Router();
const cacheHandler = getETagCacheHandler(db);
router.get(
'/:blockchain/:address',
cacheHandler,
asyncHandler(async (req, res, next) => {
// Retrieves a list of names owned by the address provided.
const { blockchain, address } = req.params;
@@ -23,6 +30,7 @@ export function createBnsAddressesRouter(db: DataStore, chainId: ChainID): expre
includeUnanchored,
chainId,
});
setETagCacheHeaders(res);
if (namesByAddress.found) {
res.json({ names: namesByAddress.result });
} else {

View File

@@ -3,92 +3,85 @@ import { asyncHandler } from '../../async-handler';
import { DataStore } from '../../../datastore/common';
import { parsePagingQueryInput } from '../../../api/pagination';
import { isUnanchoredRequest } from '../../query-helpers';
import { bnsBlockchain, BnsErrors } from '../../../bns-constants';
import { bnsBlockchain, BnsErrors } from '../../../event-stream/bns/bns-constants';
import { BnsGetNameInfoResponse } from '@stacks/stacks-blockchain-api-types';
import { ChainID } from '@stacks/transactions';
import {
getETagCacheHandler,
setETagCacheHeaders,
} from '../../../api/controllers/cache-controller';
export function createBnsNamesRouter(db: DataStore, chainId: ChainID): express.Router {
const router = express.Router();
const cacheHandler = getETagCacheHandler(db);
router.get(
'/:name/zonefile/:zoneFileHash',
cacheHandler,
asyncHandler(async (req, res, next) => {
// Fetches the historical zonefile specified by the username and zone hash.
const { name, zoneFileHash } = req.params;
const includeUnanchored = isUnanchoredRequest(req, res, next);
let nameFound = false;
const nameQuery = await db.getName({ name: name, includeUnanchored, chainId: chainId });
nameFound = nameQuery.found;
if (!nameFound) {
const subdomainQuery = await db.getSubdomain({ subdomain: name, includeUnanchored });
nameFound = subdomainQuery.found;
}
if (nameFound) {
const zonefile = await db.getHistoricalZoneFile({ name: name, zoneFileHash: zoneFileHash });
if (zonefile.found) {
res.json(zonefile.result);
} else {
res.status(404).json({ error: 'No such zonefile' });
}
const zonefile = await db.getHistoricalZoneFile({
name: name,
zoneFileHash: zoneFileHash,
includeUnanchored,
});
if (zonefile.found) {
setETagCacheHeaders(res);
res.json(zonefile.result);
} else {
res.status(400).json({ error: 'Invalid name or subdomain' });
res.status(404).json({ error: 'No such name or zonefile' });
}
})
);
router.get(
'/:name/subdomains',
cacheHandler,
asyncHandler(async (req, res, next) => {
const { name } = req.params;
const includeUnanchored = isUnanchoredRequest(req, res, next);
const subdomainsList = await db.getSubdomainsListInName({ name, includeUnanchored });
setETagCacheHeaders(res);
res.json(subdomainsList.results);
})
);
router.get(
'/:name/zonefile',
cacheHandler,
asyncHandler(async (req, res, next) => {
// Fetch a users raw zone file. This only works for RFC-compliant zone files. This method returns an error for names that have non-standard zone files.
const { name } = req.params;
const includeUnanchored = isUnanchoredRequest(req, res, next);
let nameFound = false;
const nameQuery = await db.getName({ name: name, includeUnanchored, chainId: chainId });
nameFound = nameQuery.found;
if (!nameFound) {
const subdomainQuery = await db.getSubdomain({ subdomain: name, includeUnanchored });
nameFound = subdomainQuery.found;
}
if (nameFound) {
const zonefile = await db.getLatestZoneFile({ name: name, includeUnanchored });
if (zonefile.found) {
res.json(zonefile.result);
} else {
res.status(404).json({ error: 'No zone file for name' });
}
const zonefile = await db.getLatestZoneFile({ name: name, includeUnanchored });
if (zonefile.found) {
setETagCacheHeaders(res);
res.json(zonefile.result);
} else {
res.status(400).json({ error: 'Invalid name or subdomain' });
res.status(404).json({ error: 'No such name or zonefile does not exist' });
}
})
);
router.get(
'/',
cacheHandler,
asyncHandler(async (req, res, next) => {
const page = parsePagingQueryInput(req.query.page ?? 0);
const includeUnanchored = isUnanchoredRequest(req, res, next);
const { results } = await db.getNamesList({ page, includeUnanchored });
if (results.length === 0 && req.query.page) {
res.status(400).json(BnsErrors.InvalidPageNumber);
} else {
setETagCacheHeaders(res);
res.json(results);
}
res.json(results);
})
);
router.get(
'/:name',
cacheHandler,
asyncHandler(async (req, res, next) => {
const { name } = req.params;
const includeUnanchored = isUnanchoredRequest(req, res, next);
@@ -149,6 +142,7 @@ export function createBnsNamesRouter(db: DataStore, chainId: ChainID): express.R
const response = Object.fromEntries(
Object.entries(nameInfoResponse).filter(([_, v]) => v != null)
);
setETagCacheHeaders(res);
res.json(response);
})
);

View File

@@ -3,20 +3,27 @@ import { asyncHandler } from '../../async-handler';
import { DataStore } from '../../../datastore/common';
import { parsePagingQueryInput } from '../../../api/pagination';
import { isUnanchoredRequest } from '../../query-helpers';
import { BnsErrors } from '../../../bns-constants';
import { BnsErrors } from '../../../event-stream/bns/bns-constants';
import { BnsGetAllNamespacesResponse } from '@stacks/stacks-blockchain-api-types';
import {
getETagCacheHandler,
setETagCacheHeaders,
} from '../../../api/controllers/cache-controller';
export function createBnsNamespacesRouter(db: DataStore): express.Router {
const router = express.Router();
const cacheHandler = getETagCacheHandler(db);
router.get(
'/',
cacheHandler,
asyncHandler(async (req, res, next) => {
const includeUnanchored = isUnanchoredRequest(req, res, next);
const { results } = await db.getNamespaceList({ includeUnanchored });
const response: BnsGetAllNamespacesResponse = {
namespaces: results,
};
setETagCacheHeaders(res);
res.json(response);
return;
})
@@ -24,6 +31,7 @@ export function createBnsNamespacesRouter(db: DataStore): express.Router {
router.get(
'/:tld/names',
cacheHandler,
asyncHandler(async (req, res, next) => {
const { tld } = req.params;
const page = parsePagingQueryInput(req.query.page ?? 0);
@@ -39,8 +47,10 @@ export function createBnsNamespacesRouter(db: DataStore): express.Router {
});
if (results.length === 0 && req.query.page) {
res.status(400).json(BnsErrors.InvalidPageNumber);
} else {
setETagCacheHeaders(res);
res.json(results);
}
res.json(results);
}
})
);

View File

@@ -14,12 +14,12 @@ import {
listCV,
ChainID,
} from '@stacks/transactions';
import { GetStacksNetwork, getBnsContractID } from './../../../bns-helpers';
import {
BnsGetNamePriceResponse,
BnsGetNamespacePriceResponse,
} from '@stacks/stacks-blockchain-api-types';
import { isValidPrincipal, logger } from './../../../helpers';
import { getBnsContractID, GetStacksNetwork } from '../../../event-stream/bns/bns-helpers';
export function createBnsPriceRouter(db: DataStore, chainId: ChainID): express.Router {
const router = express.Router();

View File

@@ -428,6 +428,31 @@ export interface DataStoreTxEventData {
namespaces: DbBnsNamespace[];
}
export interface DataStoreAttachmentData {
op: string;
name: string;
namespace: string;
zonefile: string;
zonefileHash: string;
txId: string;
indexBlockHash: string;
blockHeight: number;
}
export interface DataStoreSubdomainBlockData {
index_block_hash: string;
parent_index_block_hash: string;
microblock_hash: string;
microblock_sequence: number;
microblock_canonical: boolean;
}
export interface DataStoreAttachmentSubdomainData {
attachment?: DataStoreAttachmentData;
blockData?: DataStoreSubdomainBlockData;
subdomains?: DbBnsSubdomain[];
}
export interface DbSearchResult {
entity_type: 'standard_address' | 'contract_address' | 'block_hash' | 'tx_id' | 'mempool_tx_id';
entity_id: string;
@@ -472,6 +497,7 @@ export interface DbInboundStxTransfer {
export interface DbBnsZoneFile {
zonefile: string;
}
export interface DbBnsNamespace {
id?: number;
namespace_id: string;
@@ -480,8 +506,8 @@ export interface DbBnsNamespace {
reveal_block: number;
ready_block: number;
buckets: string;
base: number;
coeff: number;
base: bigint;
coeff: bigint;
nonalpha_discount: number;
no_vowel_discount: number;
lifetime: number;
@@ -683,7 +709,6 @@ export interface DataStore extends DataStoreEventEmitter {
limit: number;
offset: number;
}): Promise<{ results: DbMempoolTx[]; total: number }>;
getTxStrict(args: { txId: string; indexBlockHash: string }): Promise<FoundOrNot<DbTx>>;
getTx(args: { txId: string; includeUnanchored: boolean }): Promise<FoundOrNot<DbTx>>;
getTxList(args: {
limit: number;
@@ -744,7 +769,8 @@ export interface DataStore extends DataStoreEventEmitter {
updateMicroblocks(data: DataStoreMicroblockUpdateData): Promise<void>;
updateZoneContent(zonefile: string, zonefile_hash: string, tx_id: string): Promise<void>;
updateAttachments(attachments: DataStoreAttachmentData[]): Promise<void>;
resolveBnsSubdomains(
blockData: {
index_block_hash: string;
@@ -948,6 +974,7 @@ export interface DataStore extends DataStoreEventEmitter {
getHistoricalZoneFile(args: {
name: string;
zoneFileHash: string;
includeUnanchored: boolean;
}): Promise<FoundOrNot<DbBnsZoneFile>>;
getLatestZoneFile(args: {
name: string;

View File

@@ -42,6 +42,8 @@ import {
bnsNameCV,
getBnsSmartContractId,
bnsHexValueToName,
I32_MAX,
defaultLogLevel,
} from '../helpers';
import {
DataStore,
@@ -98,6 +100,9 @@ import {
NftEventWithTxMetadata,
DbAssetEventTypeId,
DbTxGlobalStatus,
DataStoreAttachmentData,
DataStoreSubdomainBlockData,
DataStoreAttachmentSubdomainData,
} from './common';
import {
AddressTokenOfferingLocked,
@@ -120,6 +125,8 @@ import {
PgTokensNotificationPayload,
PgTxNotificationPayload,
} from './postgres-notifier';
import * as zoneFileParser from 'zone-file';
import { parseResolver, parseZoneFileTxt } from '../event-stream/bns/bns-helpers';
const MIGRATIONS_TABLE = 'pgmigrations';
const MIGRATIONS_DIR = path.join(APP_DIR, 'migrations');
@@ -237,6 +244,7 @@ export async function runMigrations(
warn: msg => logger.warn(msg),
error: msg => logger.error(msg),
},
verbose: defaultLogLevel === 'verbose',
};
if (clientConfig.schema) {
runnerOpts.schema = clientConfig.schema;
@@ -1029,23 +1037,11 @@ export class PgDataStore
payload jsonb NOT NULL
) ON COMMIT DROP
`);
// Use a `temp_raw_tsv` table first to store the raw TSV data as it might come with duplicate
// rows which would trigger the `PRIMARY KEY` constraint in `temp_event_observer_requests`.
// We will "upsert" from the former to the latter before event ingestion.
await client.query(`
CREATE TEMPORARY TABLE temp_raw_tsv
(LIKE temp_event_observer_requests)
ON COMMIT DROP
`);
onStatusUpdate?.('Importing raw event requests into temporary table...');
const importStream = client.query(pgCopyStreams.from(`COPY temp_raw_tsv FROM STDIN`));
const importStream = client.query(
pgCopyStreams.from(`COPY temp_event_observer_requests FROM STDIN`)
);
await pipelineAsync(readStream, importStream);
await client.query(`
INSERT INTO temp_event_observer_requests
SELECT *
FROM temp_raw_tsv
ON CONFLICT DO NOTHING;
`);
const totalRowCountQuery = await client.query<{ count: string }>(
`SELECT COUNT(id) count FROM temp_event_observer_requests`
);
@@ -2135,22 +2131,6 @@ export class PgDataStore
});
}
async updateZoneContent(zonefile: string, zonefile_hash: string, tx_id: string): Promise<void> {
await this.queryTx(async client => {
// inserting zonefile into zonefiles table
const validZonefileHash = this.validateZonefileHash(zonefile_hash);
await client.query(
`
UPDATE zonefiles
SET zonefile = $1
WHERE zonefile_hash = $2
`,
[zonefile, validZonefileHash]
);
});
await this.notifier?.sendName({ nameInfo: tx_id });
}
private validateZonefileHash(zonefileHash: string) {
// this function removes the `0x` from the incoming zonefile hash, either for insertion or search.
const index = zonefileHash.indexOf('0x');
@@ -2160,6 +2140,91 @@ export class PgDataStore
return zonefileHash;
}
async updateAttachments(attachments: DataStoreAttachmentData[]): Promise<void> {
await this.queryTx(async client => {
// Each attachment will batch insert zonefiles for name and all subdomains that apply.
for (const attachment of attachments) {
const subdomainData: DataStoreAttachmentSubdomainData[] = [];
if (attachment.op === 'name-update') {
// If this is a zonefile update, break it down into subdomains and update all of them. We
// must find the correct transaction that registered the zonefile in the first place and
// associate it with each entry.
const zonefile = Buffer.from(attachment.zonefile, 'hex').toString();
const zoneFileContents = zoneFileParser.parseZoneFile(zonefile);
const zoneFileTxt = zoneFileContents.txt;
if (zoneFileTxt && zoneFileTxt.length > 0) {
const dbTx = await client.query<TxQueryResult>(
`SELECT ${txColumns()} FROM txs
WHERE tx_id = $1 AND index_block_hash = $2
ORDER BY canonical DESC, microblock_canonical DESC, block_height DESC
LIMIT 1`,
[hexToBuffer(attachment.txId), hexToBuffer(attachment.indexBlockHash)]
);
let isCanonical = true;
let txIndex = -1;
const blockData: DataStoreSubdomainBlockData = {
index_block_hash: '',
parent_index_block_hash: '',
microblock_hash: '',
microblock_sequence: I32_MAX,
microblock_canonical: true,
};
if (dbTx.rowCount > 0) {
const parsedDbTx = this.parseTxQueryResult(dbTx.rows[0]);
isCanonical = parsedDbTx.canonical;
txIndex = parsedDbTx.tx_index;
blockData.index_block_hash = parsedDbTx.index_block_hash;
blockData.parent_index_block_hash = parsedDbTx.parent_index_block_hash;
blockData.microblock_hash = parsedDbTx.microblock_hash;
blockData.microblock_sequence = parsedDbTx.microblock_sequence;
blockData.microblock_canonical = parsedDbTx.microblock_canonical;
} else {
logger.warn(
`Could not find transaction ${attachment.txId} associated with attachment`
);
}
const subdomains: DbBnsSubdomain[] = [];
for (let i = 0; i < zoneFileTxt.length; i++) {
const zoneFile = zoneFileTxt[i];
const parsedTxt = parseZoneFileTxt(zoneFile.txt);
if (parsedTxt.owner === '') continue; //if txt has no owner , skip it
const subdomain: DbBnsSubdomain = {
name: attachment.name.concat('.', attachment.namespace),
namespace_id: attachment.namespace,
fully_qualified_subdomain: zoneFile.name.concat(
'.',
attachment.name,
'.',
attachment.namespace
),
owner: parsedTxt.owner,
zonefile_hash: parsedTxt.zoneFileHash,
zonefile: parsedTxt.zoneFile,
tx_id: attachment.txId,
tx_index: txIndex,
canonical: isCanonical,
parent_zonefile_hash: attachment.zonefileHash.slice(2),
parent_zonefile_index: 0,
block_height: attachment.blockHeight,
zonefile_offset: 1,
resolver: zoneFileContents.uri ? parseResolver(zoneFileContents.uri) : '',
};
subdomains.push(subdomain);
}
subdomainData.push({ blockData, subdomains, attachment: attachment });
}
}
await this.updateBatchSubdomains(client, subdomainData);
await this.updateBatchZonefiles(client, subdomainData);
// Update the name's zonefile as well.
await this.updateBatchZonefiles(client, [{ attachment }]);
}
});
for (const txId of attachments.map(a => a.txId)) {
await this.notifier?.sendName({ nameInfo: txId });
}
}
async resolveBnsSubdomains(
blockData: {
index_block_hash: string;
@@ -2172,7 +2237,8 @@ export class PgDataStore
): Promise<void> {
if (data.length == 0) return;
await this.queryTx(async client => {
await this.updateBatchSubdomains(client, blockData, data);
await this.updateBatchSubdomains(client, [{ blockData, subdomains: data }]);
await this.updateBatchZonefiles(client, [{ blockData, subdomains: data }]);
});
}
@@ -4014,27 +4080,6 @@ export class PgDataStore
});
}
async getTxStrict(args: { txId: string; indexBlockHash: string }): Promise<FoundOrNot<DbTx>> {
return this.query(async client => {
const result = await client.query<ContractTxQueryResult>(
`
SELECT ${TX_COLUMNS}, ${abiColumn()}
FROM txs
WHERE tx_id = $1 AND index_block_hash = $2
ORDER BY canonical DESC, microblock_canonical DESC, block_height DESC
LIMIT 1
`,
[hexToBuffer(args.txId), hexToBuffer(args.indexBlockHash)]
);
if (result.rowCount === 0) {
return { found: false } as const;
}
const row = result.rows[0];
const tx = this.parseTxQueryResult(row);
return { found: true, result: tx };
});
}
async getTx({ txId, includeUnanchored }: { txId: string; includeUnanchored: boolean }) {
return this.queryTx(async client => {
const maxBlockHeight = await this.getMaxBlockHeight(client, { includeUnanchored });
@@ -4852,113 +4897,147 @@ export class PgDataStore
}
}
async updateBatchSubdomains(
async updateBatchZonefiles(
client: ClientBase,
blockData: {
index_block_hash: string;
parent_index_block_hash: string;
microblock_hash: string;
microblock_sequence: number;
microblock_canonical: boolean;
},
subdomains: DbBnsSubdomain[]
) {
// bns insertion variables
const columnCount = 18;
const insertParams = this.generateParameterizedInsertString({
rowCount: subdomains.length,
columnCount,
});
const values: any[] = [];
// zonefile insertion variables
const zonefilesColumnCount = 2;
const zonefileInsertParams = this.generateParameterizedInsertString({
rowCount: subdomains.length,
columnCount: zonefilesColumnCount,
});
const zonefileValues: string[] = [];
for (const subdomain of subdomains) {
let txIndex = subdomain.tx_index;
if (txIndex === -1) {
const txQuery = await client.query<{ tx_index: number }>(
`
SELECT tx_index from txs
WHERE tx_id = $1 AND index_block_hash = $2 AND block_height = $3
LIMIT 1
`,
[
data: DataStoreAttachmentSubdomainData[]
): Promise<void> {
let zonefileCount = 0;
const zonefileValues: any[] = [];
for (const dataItem of data) {
if (dataItem.subdomains && dataItem.blockData) {
for (const subdomain of dataItem.subdomains) {
zonefileValues.push(
subdomain.fully_qualified_subdomain,
subdomain.zonefile,
this.validateZonefileHash(subdomain.zonefile_hash),
hexToBuffer(subdomain.tx_id),
hexToBuffer(blockData.index_block_hash),
subdomain.block_height,
]
);
if (txQuery.rowCount === 0) {
logger.warn(`Could not find tx index for subdomain entry: ${JSON.stringify(subdomain)}`);
txIndex = 0;
} else {
txIndex = txQuery.rows[0].tx_index;
hexToBuffer(dataItem.blockData.index_block_hash)
);
zonefileCount++;
}
}
// preparing bns values for insertion
values.push(
subdomain.name,
subdomain.namespace_id,
subdomain.fully_qualified_subdomain,
subdomain.owner,
this.validateZonefileHash(subdomain.zonefile_hash),
subdomain.parent_zonefile_hash,
subdomain.parent_zonefile_index,
subdomain.block_height,
txIndex,
subdomain.zonefile_offset,
subdomain.resolver,
subdomain.canonical,
hexToBuffer(subdomain.tx_id),
hexToBuffer(blockData.index_block_hash),
hexToBuffer(blockData.parent_index_block_hash),
hexToBuffer(blockData.microblock_hash),
blockData.microblock_sequence,
blockData.microblock_canonical
);
// preparing zonefile values for insertion
zonefileValues.push(subdomain.zonefile, this.validateZonefileHash(subdomain.zonefile_hash));
}
// bns insertion query
const insertQuery = `INSERT INTO subdomains (
name, namespace_id, fully_qualified_subdomain, owner,
zonefile_hash, parent_zonefile_hash, parent_zonefile_index, block_height, tx_index,
zonefile_offset, resolver, canonical, tx_id,
index_block_hash, parent_index_block_hash, microblock_hash, microblock_sequence, microblock_canonical
) VALUES ${insertParams}`;
const insertQueryName = `insert-batch-subdomains_${columnCount}x${subdomains.length}`;
const insertBnsSubdomainsEventQuery: QueryConfig = {
name: insertQueryName,
text: insertQuery,
values,
};
// zonefile insertion query
const zonefileInsertQuery = `INSERT INTO zonefiles (zonefile, zonefile_hash) VALUES ${zonefileInsertParams}`;
const insertZonefileQueryName = `insert-batch-zonefiles_${columnCount}x${subdomains.length}`;
const insertZonefilesEventQuery: QueryConfig = {
name: insertZonefileQueryName,
text: zonefileInsertQuery,
values: zonefileValues,
};
try {
// checking for bns insertion errors
const bnsRes = await client.query(insertBnsSubdomainsEventQuery);
if (bnsRes.rowCount !== subdomains.length) {
throw new Error(`Expected ${subdomains.length} inserts, got ${bnsRes.rowCount} for BNS`);
if (dataItem.attachment) {
zonefileValues.push(
`${dataItem.attachment.name}.${dataItem.attachment.namespace}`,
Buffer.from(dataItem.attachment.zonefile, 'hex').toString(),
this.validateZonefileHash(dataItem.attachment.zonefileHash),
hexToBuffer(dataItem.attachment.txId),
hexToBuffer(dataItem.attachment.indexBlockHash)
);
zonefileCount++;
}
// checking for zonefile insertion errors
}
if (!zonefileCount) {
return;
}
try {
const zonefilesColumnCount = 5;
const zonefileInsertParams = this.generateParameterizedInsertString({
rowCount: zonefileCount,
columnCount: zonefilesColumnCount,
});
const zonefileInsertQuery = `
INSERT INTO zonefiles (name, zonefile, zonefile_hash, tx_id, index_block_hash)
VALUES ${zonefileInsertParams}
ON CONFLICT ON CONSTRAINT unique_name_zonefile_hash_tx_id_index_block_hash DO
UPDATE SET zonefile = EXCLUDED.zonefile
`;
const insertZonefileQueryName = `insert-batch-zonefiles_${zonefilesColumnCount}x${zonefileCount}`;
const insertZonefilesEventQuery: QueryConfig = {
name: insertZonefileQueryName,
text: zonefileInsertQuery,
values: zonefileValues,
};
const zonefilesRes = await client.query(insertZonefilesEventQuery);
if (zonefilesRes.rowCount !== subdomains.length) {
if (zonefilesRes.rowCount !== zonefileCount) {
throw new Error(
`Expected ${subdomains.length} inserts, got ${zonefilesRes.rowCount} for zonefiles`
`Expected ${zonefileCount} inserts, got ${zonefilesRes.rowCount} for zonefiles`
);
}
} catch (e: any) {
logError(`subdomain errors ${e.message}`, e);
logError(`zonefile batch error ${e.message}`, e);
throw e;
}
}
async updateBatchSubdomains(
client: ClientBase,
data: DataStoreAttachmentSubdomainData[]
): Promise<void> {
let subdomainCount = 0;
const subdomainValues: any[] = [];
for (const dataItem of data) {
if (dataItem.subdomains && dataItem.blockData) {
for (const subdomain of dataItem.subdomains) {
subdomainValues.push(
subdomain.name,
subdomain.namespace_id,
subdomain.fully_qualified_subdomain,
subdomain.owner,
this.validateZonefileHash(subdomain.zonefile_hash),
subdomain.parent_zonefile_hash,
subdomain.parent_zonefile_index,
subdomain.block_height,
subdomain.tx_index,
subdomain.zonefile_offset,
subdomain.resolver,
subdomain.canonical,
hexToBuffer(subdomain.tx_id),
hexToBuffer(dataItem.blockData.index_block_hash),
hexToBuffer(dataItem.blockData.parent_index_block_hash),
hexToBuffer(dataItem.blockData.microblock_hash),
dataItem.blockData.microblock_sequence,
dataItem.blockData.microblock_canonical
);
subdomainCount++;
}
}
}
if (!subdomainCount) {
return;
}
try {
const subdomainColumnCount = 18;
const subdomainInsertParams = this.generateParameterizedInsertString({
rowCount: subdomainCount,
columnCount: subdomainColumnCount,
});
const insertQuery = `
INSERT INTO subdomains (
name, namespace_id, fully_qualified_subdomain, owner,
zonefile_hash, parent_zonefile_hash, parent_zonefile_index, block_height, tx_index,
zonefile_offset, resolver, canonical, tx_id,
index_block_hash, parent_index_block_hash, microblock_hash, microblock_sequence, microblock_canonical
) VALUES ${subdomainInsertParams}
ON CONFLICT ON CONSTRAINT unique_fully_qualified_subdomain_tx_id_index_block_hash_microblock_hash DO
UPDATE SET
name = EXCLUDED.name,
namespace_id = EXCLUDED.namespace_id,
owner = EXCLUDED.owner,
zonefile_hash = EXCLUDED.zonefile_hash,
parent_zonefile_hash = EXCLUDED.parent_zonefile_hash,
parent_zonefile_index = EXCLUDED.parent_zonefile_index,
block_height = EXCLUDED.block_height,
tx_index = EXCLUDED.tx_index,
zonefile_offset = EXCLUDED.zonefile_offset,
resolver = EXCLUDED.resolver,
canonical = EXCLUDED.canonical,
parent_index_block_hash = EXCLUDED.parent_index_block_hash,
microblock_sequence = EXCLUDED.microblock_sequence,
microblock_canonical = EXCLUDED.microblock_canonical
`;
const insertQueryName = `insert-batch-subdomains_${subdomainColumnCount}x${subdomainCount}`;
const insertBnsSubdomainsEventQuery: QueryConfig = {
name: insertQueryName,
text: insertQuery,
values: subdomainValues,
};
const bnsRes = await client.query(insertBnsSubdomainsEventQuery);
if (bnsRes.rowCount !== subdomainCount) {
throw new Error(`Expected ${subdomainCount} inserts, got ${bnsRes.rowCount} for BNS`);
}
} catch (e: any) {
logError(`subdomain batch error ${e.message}`, e);
throw e;
}
}
@@ -6806,11 +6885,33 @@ export class PgDataStore
const validZonefileHash = this.validateZonefileHash(zonefile_hash);
await client.query(
`
INSERT INTO zonefiles (zonefile, zonefile_hash)
VALUES ($1, $2)
`,
[zonefile, validZonefileHash]
INSERT INTO zonefiles (name, zonefile, zonefile_hash, tx_id, index_block_hash)
VALUES ($1, $2, $3, $4, $5)
ON CONFLICT ON CONSTRAINT unique_name_zonefile_hash_tx_id_index_block_hash DO
UPDATE SET zonefile = EXCLUDED.zonefile
`,
[
name,
zonefile,
validZonefileHash,
hexToBuffer(tx_id),
hexToBuffer(blockData.index_block_hash),
]
);
// Try to figure out the name's expiration block based on its namespace's lifetime.
const namespaceLifetime = await client.query<{ lifetime: number }>(
`SELECT lifetime
FROM namespaces
WHERE namespace_id = $1
AND canonical = true AND microblock_canonical = true
ORDER BY namespace_id, ready_block DESC, microblock_sequence DESC, tx_index DESC
LIMIT 1`,
[namespace_id]
);
const expireBlock =
namespaceLifetime.rowCount > 0
? registered_at + namespaceLifetime.rows[0].lifetime
: expire_block;
await client.query(
`
INSERT INTO names(
@@ -6818,12 +6919,25 @@ export class PgDataStore
tx_index, tx_id, status, canonical,
index_block_hash, parent_index_block_hash, microblock_hash, microblock_sequence, microblock_canonical
) values($1, $2, $3, $4, $5, $6, $7, $8,$9, $10, $11, $12, $13, $14, $15)
`,
ON CONFLICT ON CONSTRAINT unique_name_tx_id_index_block_hash_microblock_hash DO
UPDATE SET
address = EXCLUDED.address,
registered_at = EXCLUDED.registered_at,
expire_block = EXCLUDED.expire_block,
zonefile_hash = EXCLUDED.zonefile_hash,
namespace_id = EXCLUDED.namespace_id,
tx_index = EXCLUDED.tx_index,
status = EXCLUDED.status,
canonical = EXCLUDED.canonical,
parent_index_block_hash = EXCLUDED.parent_index_block_hash,
microblock_sequence = EXCLUDED.microblock_sequence,
microblock_canonical = EXCLUDED.microblock_canonical
`,
[
name,
address,
registered_at,
expire_block,
expireBlock,
validZonefileHash,
namespace_id,
tx_index,
@@ -6867,15 +6981,32 @@ export class PgDataStore
tx_index,
canonical,
} = bnsNamespace;
await client.query(
`
INSERT INTO namespaces(
namespace_id, launched_at, address, reveal_block, ready_block, buckets,
base,coeff, nonalpha_discount,no_vowel_discount, lifetime, status, tx_index,
base, coeff, nonalpha_discount, no_vowel_discount, lifetime, status, tx_index,
tx_id, canonical,
index_block_hash, parent_index_block_hash, microblock_hash, microblock_sequence, microblock_canonical
) values($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20)
ON CONFLICT ON CONSTRAINT unique_namespace_id_tx_id_index_block_hash_microblock_hash DO
UPDATE SET
launched_at = EXCLUDED.launched_at,
address = EXCLUDED.address,
reveal_block = EXCLUDED.reveal_block,
ready_block = EXCLUDED.ready_block,
buckets = EXCLUDED.buckets,
base = EXCLUDED.base,
coeff = EXCLUDED.coeff,
nonalpha_discount = EXCLUDED.nonalpha_discount,
no_vowel_discount = EXCLUDED.no_vowel_discount,
lifetime = EXCLUDED.lifetime,
status = EXCLUDED.status,
tx_index = EXCLUDED.tx_index,
canonical = EXCLUDED.canonical,
parent_index_block_hash = EXCLUDED.parent_index_block_hash,
microblock_sequence = EXCLUDED.microblock_sequence,
microblock_canonical = EXCLUDED.microblock_canonical
`,
[
namespace_id,
@@ -6967,7 +7098,7 @@ export class PgDataStore
FROM namespaces
WHERE canonical = true AND microblock_canonical = true
AND ready_block <= $1
ORDER BY namespace_id, ready_block DESC, tx_index DESC
ORDER BY namespace_id, ready_block DESC, microblock_sequence DESC, tx_index DESC
`,
[maxBlockHeight]
);
@@ -6998,7 +7129,7 @@ export class PgDataStore
WHERE namespace_id = $1
AND registered_at <= $3
AND canonical = true AND microblock_canonical = true
ORDER BY name, registered_at DESC, tx_index DESC
ORDER BY name, registered_at DESC, microblock_sequence DESC, tx_index DESC
LIMIT 100
OFFSET $2
`,
@@ -7026,7 +7157,7 @@ export class PgDataStore
WHERE namespace_id = $1
AND ready_block <= $2
AND canonical = true AND microblock_canonical = true
ORDER BY namespace_id, ready_block DESC, tx_index DESC
ORDER BY namespace_id, ready_block DESC, microblock_sequence DESC, tx_index DESC
LIMIT 1
`,
[namespace, maxBlockHeight]
@@ -7060,13 +7191,15 @@ export class PgDataStore
DbBnsName & { tx_id: Buffer; index_block_hash: Buffer }
>(
`
SELECT DISTINCT ON (names.name) names.name, names.*, zonefiles.zonefile
FROM names
LEFT JOIN zonefiles ON names.zonefile_hash = zonefiles.zonefile_hash
WHERE name = $1
AND registered_at <= $2
AND canonical = true AND microblock_canonical = true
ORDER BY name, registered_at DESC, tx_index DESC
SELECT n.*, z.zonefile
FROM names AS n
LEFT JOIN zonefiles AS z USING (name, tx_id, index_block_hash)
WHERE n.name = $1
AND n.registered_at <= $2
AND n.canonical = true
AND n.microblock_canonical = true
ORDER BY n.registered_at DESC, n.microblock_sequence DESC, n.tx_index DESC
LIMIT 1
`,
[name, maxBlockHeight]
);
@@ -7117,27 +7250,54 @@ export class PgDataStore
async getHistoricalZoneFile(args: {
name: string;
zoneFileHash: string;
includeUnanchored: boolean;
}): Promise<FoundOrNot<DbBnsZoneFile>> {
const queryResult = await this.query(client => {
const queryResult = await this.queryTx(async client => {
const maxBlockHeight = await this.getMaxBlockHeight(client, {
includeUnanchored: args.includeUnanchored,
});
const validZonefileHash = this.validateZonefileHash(args.zoneFileHash);
return client.query<{ zonefile: string }>(
`
SELECT zonefile
FROM names
LEFT JOIN zonefiles ON zonefiles.zonefile_hash = names.zonefile_hash
WHERE name = $1
AND names.zonefile_hash = $2
UNION ALL
SELECT zonefile
FROM subdomains
LEFT JOIN zonefiles ON zonefiles.zonefile_hash = subdomains.zonefile_hash
WHERE fully_qualified_subdomain = $1
AND subdomains.zonefile_hash = $2
`,
[args.name, validZonefileHash]
);
// Depending on the kind of name we got, use the correct table to pivot on canonical chain
// state to get the zonefile. We can't pivot on the `txs` table because some names/subdomains
// were imported from Stacks v1 and they don't have an associated tx.
const isSubdomain = args.name.split('.').length > 2;
if (isSubdomain) {
return client.query<{ zonefile: string }>(
`
SELECT zonefile
FROM zonefiles AS z
INNER JOIN subdomains AS s ON
s.fully_qualified_subdomain = z.name
AND s.tx_id = z.tx_id
AND s.index_block_hash = z.index_block_hash
WHERE z.name = $1
AND z.zonefile_hash = $2
AND s.canonical = TRUE
AND s.microblock_canonical = TRUE
AND s.block_height <= $3
ORDER BY s.block_height DESC, s.microblock_sequence DESC, s.tx_index DESC
LIMIT 1
`,
[args.name, validZonefileHash, maxBlockHeight]
);
} else {
return client.query<{ zonefile: string }>(
`
SELECT zonefile
FROM zonefiles AS z
INNER JOIN names AS n USING (name, tx_id, index_block_hash)
WHERE z.name = $1
AND z.zonefile_hash = $2
AND n.canonical = TRUE
AND n.microblock_canonical = TRUE
AND n.registered_at <= $3
ORDER BY n.registered_at DESC, n.microblock_sequence DESC, n.tx_index DESC
LIMIT 1
`,
[args.name, validZonefileHash, maxBlockHeight]
);
}
});
if (queryResult.rowCount > 0) {
return {
found: true,
@@ -7156,51 +7316,45 @@ export class PgDataStore
}): Promise<FoundOrNot<DbBnsZoneFile>> {
const queryResult = await this.queryTx(async client => {
const maxBlockHeight = await this.getMaxBlockHeight(client, { includeUnanchored });
const zonefileHashResult = await client.query<{ name: string; zonefile: string }>(
`
SELECT name, zonefile_hash as zonefile FROM (
(
SELECT DISTINCT ON (name) name, zonefile_hash
FROM names
WHERE name = $1
AND registered_at <= $2
AND canonical = true AND microblock_canonical = true
ORDER BY name, registered_at DESC, tx_index DESC
LIMIT 1
)
UNION ALL (
SELECT DISTINCT ON (fully_qualified_subdomain) fully_qualified_subdomain as name, zonefile_hash
FROM subdomains
WHERE fully_qualified_subdomain = $1
AND block_height <= $2
AND canonical = true AND microblock_canonical = true
ORDER BY fully_qualified_subdomain, block_height DESC, tx_index DESC
LIMIT 1
)
) results
LIMIT 1
`,
[name, maxBlockHeight]
);
if (zonefileHashResult.rowCount === 0) {
return zonefileHashResult;
// Depending on the kind of name we got, use the correct table to pivot on canonical chain
// state to get the zonefile. We can't pivot on the `txs` table because some names/subdomains
// were imported from Stacks v1 and they don't have an associated tx.
const isSubdomain = name.split('.').length > 2;
if (isSubdomain) {
return client.query<{ zonefile: string }>(
`
SELECT zonefile
FROM zonefiles AS z
INNER JOIN subdomains AS s ON
s.fully_qualified_subdomain = z.name
AND s.tx_id = z.tx_id
AND s.index_block_hash = z.index_block_hash
WHERE z.name = $1
AND s.canonical = TRUE
AND s.microblock_canonical = TRUE
AND s.block_height <= $2
ORDER BY s.block_height DESC, s.microblock_sequence DESC, s.tx_index DESC
LIMIT 1
`,
[name, maxBlockHeight]
);
} else {
return client.query<{ zonefile: string }>(
`
SELECT zonefile
FROM zonefiles AS z
INNER JOIN names AS n USING (name, tx_id, index_block_hash)
WHERE z.name = $1
AND n.canonical = TRUE
AND n.microblock_canonical = TRUE
AND n.registered_at <= $2
ORDER BY n.registered_at DESC, n.microblock_sequence DESC, n.tx_index DESC
LIMIT 1
`,
[name, maxBlockHeight]
);
}
const zonefileHash = zonefileHashResult.rows[0].zonefile;
const zonefileResult = await client.query<{ zonefile: string }>(
`
SELECT zonefile
FROM zonefiles
WHERE zonefile_hash = $1
`,
[zonefileHash]
);
if (zonefileResult.rowCount === 0) {
return zonefileHashResult;
}
zonefileHashResult.rows[0].zonefile = zonefileResult.rows[0].zonefile;
return zonefileHashResult;
});
if (queryResult.rowCount > 0) {
return {
found: true,
@@ -7313,8 +7467,10 @@ export class PgDataStore
`
SELECT DISTINCT ON (fully_qualified_subdomain) fully_qualified_subdomain
FROM subdomains
WHERE name = $1 AND block_height <= $2
AND canonical = true AND microblock_canonical = true
WHERE name = $1
AND block_height <= $2
AND canonical = true
AND microblock_canonical = true
ORDER BY fully_qualified_subdomain, block_height DESC, microblock_sequence DESC, tx_index DESC
`,
[name, maxBlockHeight]
@@ -7340,7 +7496,7 @@ export class PgDataStore
FROM subdomains
WHERE block_height <= $2
AND canonical = true AND microblock_canonical = true
ORDER BY fully_qualified_subdomain, block_height DESC, tx_index DESC
ORDER BY fully_qualified_subdomain, block_height DESC, microblock_sequence DESC, tx_index DESC
LIMIT 100
OFFSET $1
`,
@@ -7361,7 +7517,7 @@ export class PgDataStore
FROM names
WHERE canonical = true AND microblock_canonical = true
AND registered_at <= $2
ORDER BY name, registered_at DESC, tx_index DESC
ORDER BY name, registered_at DESC, microblock_sequence DESC, tx_index DESC
LIMIT 100
OFFSET $1
`,
@@ -7382,36 +7538,29 @@ export class PgDataStore
}): Promise<FoundOrNot<DbBnsSubdomain & { index_block_hash: string }>> {
const queryResult = await this.queryTx(async client => {
const maxBlockHeight = await this.getMaxBlockHeight(client, { includeUnanchored });
const subdomainResult = await client.query<
const result = await client.query<
DbBnsSubdomain & { tx_id: Buffer; index_block_hash: Buffer }
>(
`
SELECT DISTINCT ON(subdomains.fully_qualified_subdomain) subdomains.fully_qualified_subdomain, *
FROM subdomains
WHERE canonical = true AND microblock_canonical = true
AND block_height <= $2
AND fully_qualified_subdomain = $1
ORDER BY fully_qualified_subdomain, block_height DESC, tx_index DESC
SELECT s.*, z.zonefile
FROM subdomains AS s
LEFT JOIN zonefiles AS z
ON z.name = s.fully_qualified_subdomain
AND z.tx_id = s.tx_id
AND z.index_block_hash = s.index_block_hash
WHERE s.canonical = true
AND s.microblock_canonical = true
AND s.block_height <= $2
AND s.fully_qualified_subdomain = $1
ORDER BY s.block_height DESC, s.microblock_sequence DESC, s.tx_index DESC
LIMIT 1
`,
[subdomain, maxBlockHeight]
);
if (subdomainResult.rowCount === 0 || !subdomainResult.rows[0].zonefile_hash) {
return subdomainResult;
if (result.rowCount === 0 || !result.rows[0].zonefile_hash) {
return result;
}
const zonefileHash = subdomainResult.rows[0].zonefile_hash;
const zonefileResult = await client.query(
`
SELECT zonefile
FROM zonefiles
WHERE zonefile_hash = $1
`,
[zonefileHash]
);
if (zonefileResult.rowCount === 0) {
return subdomainResult;
}
subdomainResult.rows[0].zonefile = zonefileResult.rows[0].zonefile;
return subdomainResult;
return result;
});
if (queryResult.rowCount > 0) {
return {
@@ -7434,7 +7583,7 @@ export class PgDataStore
FROM subdomains
WHERE canonical = true AND microblock_canonical = true
AND name = $1
ORDER BY name, block_height DESC, tx_index DESC
ORDER BY name, block_height DESC, microblock_sequence DESC, tx_index DESC
LIMIT 1
`,
[args.name]

View File

@@ -156,6 +156,9 @@ export async function importEventsFromTsv(
});
if (rawEvent.event_path === '/new_block') {
blockHeight = await getDbBlockHeight(db);
if (blockHeight % 1000 === 0) {
console.log(`Event file block height reached: ${blockHeight}`);
}
}
}
}

View File

@@ -18,14 +18,5 @@ export const enum BnsContractIdentifier {
mainnet = 'SP000000000000000000002Q6VF78.bns',
testnet = 'ST000000000000000000002AMW42H.bns',
}
export const namespaceReadyFunction = 'namespace-ready';
export const nameFunctions = [
'name-import',
'name-revoke',
'name-update',
'name-transfer',
'name-renewal',
'name-register',
];
export const bnsBlockchain = 'stacks';

View File

@@ -1,29 +1,25 @@
import { Address, ChainID, StacksMessageType } from '@stacks/transactions';
import { DbBnsNamespace } from './datastore/common';
import { hexToBuffer, hexToUtf8String } from './helpers';
import { CoreNodeParsedTxMessage } from './event-stream/core-node-message';
import { StacksCoreRpcClient, getCoreNodeEndpoint } from './core-rpc/client';
import { ChainID, ClarityType, hexToCV } from '@stacks/transactions';
import { hexToBuffer, hexToUtf8String } from '../../helpers';
import { CoreNodeParsedTxMessage } from '../../event-stream/core-node-message';
import { getCoreNodeEndpoint } from '../../core-rpc/client';
import { StacksMainnet, StacksTestnet } from '@stacks/network';
import { URIType } from 'zone-file/dist/zoneFile';
import { BnsContractIdentifier } from './bns-constants';
import { BnsContractIdentifier, printTopic } from './bns-constants';
import * as crypto from 'crypto';
import {
ClarityTypeID,
decodeClarityValue,
ClarityValue,
ClarityValueBuffer,
ClarityValueInt,
ClarityValueList,
ClarityValueOptional,
ClarityValueOptionalSome,
ClarityValueOptionalUInt,
ClarityValuePrincipalStandard,
ClarityValueStringAscii,
ClarityValueTuple,
ClarityValueUInt,
TxPayloadTypeID,
ClarityValuePrincipalContract,
} from 'stacks-encoding-native-js';
import { SmartContractEvent } from '../core-node-message';
import { DbBnsNamespace, DbBnsName } from '../../datastore/common';
interface Attachment {
attachment: {
@@ -160,8 +156,8 @@ export function parseNamespaceRawValue(
const namespaceBns: DbBnsNamespace = {
namespace_id: namespace,
address: address,
base: Number(base),
coeff: Number(coeff),
base: base,
coeff: coeff,
launched_at: launched_at,
lifetime: Number(lifetime),
no_vowel_discount: Number(no_vowel_discount),
@@ -177,39 +173,6 @@ export function parseNamespaceRawValue(
return namespaceBns;
}
export function getFunctionName(tx_id: string, transactions: CoreNodeParsedTxMessage[]): string {
const contract_function_name: string = '';
for (const tx of transactions) {
if (tx.core_tx.txid === tx_id) {
if (tx.parsed_tx.payload.type_id === TxPayloadTypeID.ContractCall) {
return tx.parsed_tx.payload.function_name;
}
}
}
return contract_function_name;
}
export function getNewOwner(
tx_id: string,
transactions: CoreNodeParsedTxMessage[]
): string | undefined {
for (const tx of transactions) {
if (tx.core_tx.txid === tx_id) {
if (tx.parsed_tx.payload.type_id === TxPayloadTypeID.ContractCall) {
if (
tx.parsed_tx.payload.function_args.length >= 3 &&
tx.parsed_tx.payload.function_args[2].type_id === ClarityTypeID.PrincipalStandard
) {
const decoded = decodeClarityValue(tx.parsed_tx.payload.function_args[2].hex);
const principal = decoded as ClarityValuePrincipalStandard;
principal.address;
}
}
}
}
return undefined;
}
export function GetStacksNetwork(chainId: ChainID) {
const network = chainId === ChainID.Mainnet ? new StacksMainnet() : new StacksTestnet();
network.coreApiUrl = `http://${getCoreNodeEndpoint()}`;
@@ -272,3 +235,81 @@ export function getBnsContractID(chainId: ChainID) {
chainId === ChainID.Mainnet ? BnsContractIdentifier.mainnet : BnsContractIdentifier.testnet;
return contractId;
}
function isEventFromBnsContract(event: SmartContractEvent): boolean {
return (
event.contract_event.topic === printTopic &&
(event.contract_event.contract_identifier === BnsContractIdentifier.mainnet ||
event.contract_event.contract_identifier === BnsContractIdentifier.testnet)
);
}
export function parseNameFromContractEvent(
event: SmartContractEvent,
tx: CoreNodeParsedTxMessage,
blockHeight: number
): DbBnsName | undefined {
if (!isEventFromBnsContract(event)) {
return;
}
let attachment: Attachment;
try {
attachment = parseNameRawValue(event.contract_event.raw_value);
} catch (error) {
return;
}
let name_address = attachment.attachment.metadata.tx_sender.address;
// Is this a `name-transfer` contract call? If so, record the new owner.
if (
attachment.attachment.metadata.op === 'name-transfer' &&
tx.parsed_tx.payload.type_id === TxPayloadTypeID.ContractCall &&
tx.parsed_tx.payload.function_args.length >= 3 &&
tx.parsed_tx.payload.function_args[2].type_id === ClarityTypeID.PrincipalStandard
) {
const decoded = decodeClarityValue(tx.parsed_tx.payload.function_args[2].hex);
const principal = decoded as ClarityValuePrincipalStandard;
name_address = principal.address;
}
const name: DbBnsName = {
name: attachment.attachment.metadata.name.concat('.', attachment.attachment.metadata.namespace),
namespace_id: attachment.attachment.metadata.namespace,
address: name_address,
// expire_block will be calculated upon DB insert based on the namespace's lifetime.
expire_block: 0,
registered_at: blockHeight,
zonefile_hash: attachment.attachment.hash,
// zonefile will be updated when an `/attachments/new` message arrives.
zonefile: '',
tx_id: event.txid,
tx_index: tx.core_tx.tx_index,
status: attachment.attachment.metadata.op,
canonical: true,
};
return name;
}
export function parseNamespaceFromContractEvent(
event: SmartContractEvent,
tx: CoreNodeParsedTxMessage,
blockHeight: number
): DbBnsNamespace | undefined {
if (!isEventFromBnsContract(event)) {
return;
}
// Look for a `namespace-ready` BNS print event.
const decodedEvent = hexToCV(event.contract_event.raw_value);
if (
decodedEvent.type === ClarityType.Tuple &&
decodedEvent.data.status &&
decodedEvent.data.status.type === ClarityType.StringASCII &&
decodedEvent.data.status.data === 'ready'
) {
const namespace = parseNamespaceRawValue(
event.contract_event.raw_value,
blockHeight,
event.txid,
tx.core_tx.tx_index
);
return namespace;
}
}

View File

@@ -25,7 +25,7 @@ interface CoreNodeEventBase {
committed: boolean;
}
interface SmartContractEvent extends CoreNodeEventBase {
export interface SmartContractEvent extends CoreNodeEventBase {
type: CoreNodeEventType.ContractEvent;
contract_event: {
/** Fully qualified contract ID, e.g. "ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH.kv-store" */

View File

@@ -1,6 +1,6 @@
import { inspect } from 'util';
import * as net from 'net';
import { Server, createServer } from 'http';
import { createServer } from 'http';
import * as express from 'express';
import * as bodyParser from 'body-parser';
import { asyncHandler } from '../api/async-handler';
@@ -8,7 +8,7 @@ import PQueue from 'p-queue';
import * as expressWinston from 'express-winston';
import * as winston from 'winston';
import { hexToBuffer, logError, logger, digestSha512_256, I32_MAX, LogLevel } from '../helpers';
import { hexToBuffer, logError, logger, LogLevel } from '../helpers';
import {
CoreNodeBlockMessage,
CoreNodeEventType,
@@ -44,6 +44,7 @@ import {
DataStoreMicroblockUpdateData,
DataStoreTxEventData,
DbMicroblock,
DataStoreAttachmentData,
} from '../datastore/common';
import {
getTxSenderAddress,
@@ -61,23 +62,8 @@ import {
TxPayloadTypeID,
} from 'stacks-encoding-native-js';
import { ChainID } from '@stacks/transactions';
import {
getFunctionName,
getNewOwner,
parseNameRawValue,
parseNamespaceRawValue,
parseResolver,
parseZoneFileTxt,
} from '../bns-helpers';
import {
printTopic,
namespaceReadyFunction,
nameFunctions,
BnsContractIdentifier,
} from '../bns-constants';
import * as zoneFileParser from 'zone-file';
import { BnsContractIdentifier } from './bns/bns-constants';
import { parseNameFromContractEvent, parseNamespaceFromContractEvent } from './bns/bns-helpers';
async function handleRawEventRequest(
eventPath: string,
@@ -381,51 +367,18 @@ function parseDataStoreTxEventData(
value: hexToBuffer(event.contract_event.raw_value),
};
dbTx.contractLogEvents.push(entry);
if (
event.contract_event.topic === printTopic &&
(event.contract_event.contract_identifier === BnsContractIdentifier.mainnet ||
event.contract_event.contract_identifier === BnsContractIdentifier.testnet)
) {
const functionName = getFunctionName(event.txid, parsedTxs);
if (nameFunctions.includes(functionName)) {
const attachment = parseNameRawValue(event.contract_event.raw_value);
let name_address = attachment.attachment.metadata.tx_sender.address;
if (functionName === 'name-transfer') {
const new_owner = getNewOwner(event.txid, parsedTxs);
if (new_owner) {
name_address = new_owner;
}
}
const name: DbBnsName = {
name: attachment.attachment.metadata.name.concat(
'.',
attachment.attachment.metadata.namespace
),
namespace_id: attachment.attachment.metadata.namespace,
address: name_address,
expire_block: 0,
registered_at: blockData.block_height,
zonefile_hash: attachment.attachment.hash,
zonefile: '', // zone file will be updated in /attachments/new
tx_id: event.txid,
tx_index: entry.tx_index,
status: attachment.attachment.metadata.op,
canonical: true,
};
dbTx.names.push(name);
}
if (functionName === namespaceReadyFunction) {
// event received for namespaces
const namespace: DbBnsNamespace | undefined = parseNamespaceRawValue(
event.contract_event.raw_value,
blockData.block_height,
event.txid,
entry.tx_index
);
if (namespace != undefined) {
dbTx.namespaces.push(namespace);
}
}
// Check if we have new BNS names or namespaces.
const parsedTx = parsedTxs.find(entry => entry.core_tx.txid === event.txid);
if (!parsedTx) {
throw new Error(`Unexpected missing tx during BNS parsing by tx_id ${event.txid}`);
}
const name = parseNameFromContractEvent(event, parsedTx, blockData.block_height);
if (name) {
dbTx.names.push(name);
}
const namespace = parseNamespaceFromContractEvent(event, parsedTx, blockData.block_height);
if (namespace) {
dbTx.namespaces.push(namespace);
}
break;
}
@@ -575,83 +528,33 @@ function parseDataStoreTxEventData(
}
async function handleNewAttachmentMessage(msg: CoreNodeAttachmentMessage[], db: DataStore) {
for (const attachment of msg) {
if (
attachment.contract_id === BnsContractIdentifier.mainnet ||
attachment.contract_id === BnsContractIdentifier.testnet
) {
const metadataCV = decodeClarityValue<
ClarityValueTuple<{
op: ClarityValueStringAscii;
name: ClarityValueBuffer;
namespace: ClarityValueBuffer;
}>
>(attachment.metadata);
const op = metadataCV.data['op'].data;
const zonefile = Buffer.from(attachment.content.slice(2), 'hex').toString();
const zoneFileHash = attachment.content_hash;
if (op === 'name-update') {
const name = hexToBuffer(metadataCV.data['name'].buffer).toString('utf8');
const namespace = hexToBuffer(metadataCV.data['namespace'].buffer).toString('utf8');
const zoneFileContents = zoneFileParser.parseZoneFile(zonefile);
const zoneFileTxt = zoneFileContents.txt;
const blockData = {
index_block_hash: '',
parent_index_block_hash: '',
microblock_hash: '',
microblock_sequence: I32_MAX,
microblock_canonical: true,
};
// Case for subdomain
if (zoneFileTxt) {
// get unresolved subdomain
let isCanonical = true;
const dbTx = await db.getTxStrict({
txId: attachment.tx_id,
indexBlockHash: attachment.index_block_hash,
});
if (dbTx.found) {
isCanonical = dbTx.result.canonical;
blockData.index_block_hash = dbTx.result.index_block_hash;
blockData.parent_index_block_hash = dbTx.result.parent_index_block_hash;
blockData.microblock_hash = dbTx.result.microblock_hash;
blockData.microblock_sequence = dbTx.result.microblock_sequence;
blockData.microblock_canonical = dbTx.result.microblock_canonical;
} else {
logger.warn(
`Could not find transaction ${attachment.tx_id} associated with attachment`
);
}
// case for subdomain
const subdomains: DbBnsSubdomain[] = [];
for (let i = 0; i < zoneFileTxt.length; i++) {
const zoneFile = zoneFileTxt[i];
const parsedTxt = parseZoneFileTxt(zoneFile.txt);
if (parsedTxt.owner === '') continue; //if txt has no owner , skip it
const subdomain: DbBnsSubdomain = {
name: name.concat('.', namespace),
namespace_id: namespace,
fully_qualified_subdomain: zoneFile.name.concat('.', name, '.', namespace),
owner: parsedTxt.owner,
zonefile_hash: parsedTxt.zoneFileHash,
zonefile: parsedTxt.zoneFile,
tx_id: attachment.tx_id,
tx_index: -1,
canonical: isCanonical,
parent_zonefile_hash: attachment.content_hash.slice(2),
parent_zonefile_index: 0, //TODO need to figure out this field
block_height: Number.parseInt(attachment.block_height, 10),
zonefile_offset: 1,
resolver: zoneFileContents.uri ? parseResolver(zoneFileContents.uri) : '',
};
subdomains.push(subdomain);
}
await db.resolveBnsSubdomains(blockData, subdomains);
}
const attachments = msg
.map(message => {
if (
message.contract_id === BnsContractIdentifier.mainnet ||
message.contract_id === BnsContractIdentifier.testnet
) {
const metadataCV = decodeClarityValue<
ClarityValueTuple<{
op: ClarityValueStringAscii;
name: ClarityValueBuffer;
namespace: ClarityValueBuffer;
}>
>(message.metadata);
return {
op: metadataCV.data['op'].data,
zonefile: message.content.slice(2),
name: hexToBuffer(metadataCV.data['name'].buffer).toString('utf8'),
namespace: hexToBuffer(metadataCV.data['namespace'].buffer).toString('utf8'),
zonefileHash: message.content_hash,
txId: message.tx_id,
indexBlockHash: message.index_block_hash,
blockHeight: Number.parseInt(message.block_height, 10),
} as DataStoreAttachmentData;
}
await db.updateZoneContent(zonefile, zoneFileHash, attachment.tx_id);
}
}
})
.filter((msg): msg is DataStoreAttachmentData => !!msg);
await db.updateAttachments(attachments);
}
interface EventMessageHandler {

View File

@@ -157,7 +157,7 @@ type DisabledLogLevels = Exclude<
type LoggerInterface = Omit<winston.Logger, DisabledLogLevels> & { level: LogLevel };
const LOG_LEVELS: LogLevel[] = ['error', 'warn', 'info', 'http', 'verbose', 'debug', 'silly'];
const defaultLogLevel: LogLevel = (() => {
export const defaultLogLevel: LogLevel = (() => {
const STACKS_API_LOG_LEVEL_ENV_VAR = 'STACKS_API_LOG_LEVEL';
const logLevelEnvVar = process.env[
STACKS_API_LOG_LEVEL_ENV_VAR
@@ -236,12 +236,6 @@ export function microStxToStx(microStx: bigint | BigNumber): string {
return bigNumResult.toFixed(STACKS_DECIMAL_PLACES, MAX_BIGNUMBER_ROUND_MODE);
}
export function digestSha512_256(input: Buffer): Buffer {
const hash = crypto.createHash('sha512-256');
const digest = hash.update(input).digest();
return digest;
}
/**
* Checks if a string is a valid Bitcoin address.
* Supports mainnet and testnet address.

View File

@@ -185,8 +185,8 @@ class ChainProcessor extends stream.Writable {
reveal_block: 0,
ready_block: 0,
buckets: parts[2],
base: parseInt(parts[3], 10),
coeff: parseInt(parts[4], 10),
base: BigInt(parts[3]),
coeff: BigInt(parts[4]),
nonalpha_discount: parseInt(parts[5], 10),
no_vowel_discount: parseInt(parts[6], 10),
lifetime: parseInt(parts[7], 10),
@@ -429,15 +429,6 @@ export async function importV1BnsData(db: PgDataStore, importDir: string) {
const client = await db.pool.connect();
try {
await client.query('BEGIN');
logger.info(`Disabling BNS table indices temporarily for a faster import`);
await client.query(`
UPDATE pg_index
SET indisready = false, indisvalid = false
WHERE indrelid = ANY (
SELECT oid FROM pg_class
WHERE relname IN ('subdomains', 'zonefiles', 'namespaces', 'names')
)
`);
const zhashes = await readZones(path.join(importDir, 'name_zonefiles.txt'));
await pipeline(
fs.createReadStream(path.join(importDir, 'chainstate.txt')),
@@ -460,7 +451,8 @@ export async function importV1BnsData(db: PgDataStore, importDir: string) {
SUBDOMAIN_BATCH_SIZE,
false
)) {
await db.updateBatchSubdomains(client, blockData, subdomainBatch);
await db.updateBatchSubdomains(client, [{ blockData, subdomains: subdomainBatch }]);
await db.updateBatchZonefiles(client, [{ blockData, subdomains: subdomainBatch }]);
subdomainsImported += subdomainBatch.length;
if (subdomainsImported % 10_000 === 0) {
logger.info(`Subdomains imported: ${subdomainsImported}`);
@@ -474,12 +466,6 @@ export async function importV1BnsData(db: PgDataStore, importDir: string) {
bns_subdomains_imported: true,
};
await db.updateConfigState(updatedConfigState, client);
logger.info(`Re-indexing BNS tables. This might take a while...`);
await client.query(`REINDEX TABLE subdomains`);
await client.query(`REINDEX TABLE zonefiles`);
await client.query(`REINDEX TABLE namespaces`);
await client.query(`REINDEX TABLE names`);
await client.query('COMMIT');
} catch (error) {
await client.query('ROLLBACK');

View File

@@ -33,11 +33,11 @@ export async function up(pgm: MigrationBuilder): Promise<void> {
notNull: true,
},
base: {
type: 'integer',
type: 'numeric',
notNull: true,
},
coeff: {
type: 'integer',
type: 'numeric',
notNull: true,
},
nonalpha_discount: {
@@ -91,7 +91,14 @@ export async function up(pgm: MigrationBuilder): Promise<void> {
},
});
pgm.createIndex('namespaces', 'index_block_hash', { method: 'hash' });
pgm.createIndex('namespaces', 'microblock_hash', { method: 'hash' });
pgm.createIndex('namespaces', [{ name: 'ready_block', sort: 'DESC' }]);
pgm.createIndex('namespaces', [
{ name: 'ready_block', sort: 'DESC' },
{ name: 'microblock_sequence', sort: 'DESC' },
{ name: 'tx_index', sort: 'DESC' },
]);
pgm.addConstraint(
'namespaces',
'unique_namespace_id_tx_id_index_block_hash_microblock_hash',
'UNIQUE(namespace_id, tx_id, index_block_hash, microblock_hash)'
);
}

View File

@@ -83,9 +83,15 @@ export async function up(pgm: MigrationBuilder): Promise<void> {
},
});
pgm.createIndex('names', 'tx_id', { method: 'hash' });
pgm.createIndex('names', 'name', { method: 'hash' });
pgm.createIndex('names', 'index_block_hash', { method: 'hash' });
pgm.createIndex('names', 'microblock_hash', { method: 'hash' });
pgm.createIndex('names', [{ name: 'registered_at', sort: 'DESC' }]);
pgm.createIndex('names', 'namespace_id');
pgm.createIndex('names', [
{ name: 'registered_at', sort: 'DESC' },
{ name: 'microblock_sequence', sort: 'DESC' },
{ name: 'tx_index', sort: 'DESC' },
]);
pgm.addConstraint(
'names',
'unique_name_tx_id_index_block_hash_microblock_hash',
'UNIQUE(name, tx_id, index_block_hash, microblock_hash)'
);
}

View File

@@ -84,10 +84,15 @@ export async function up(pgm: MigrationBuilder): Promise<void> {
},
});
pgm.createIndex('subdomains', 'owner', { method: 'hash' });
pgm.createIndex('subdomains', 'zonefile_hash', { method: 'hash' });
pgm.createIndex('subdomains', 'fully_qualified_subdomain', { method: 'hash' });
pgm.createIndex('subdomains', 'index_block_hash', { method: 'hash' });
pgm.createIndex('subdomains', 'microblock_hash', { method: 'hash' });
pgm.createIndex('subdomains', [{ name: 'block_height', sort: 'DESC' }]);
pgm.createIndex('subdomains', 'name');
pgm.createIndex('subdomains', [
{ name: 'block_height', sort: 'DESC' },
{ name: 'microblock_sequence', sort: 'DESC' },
{ name: 'tx_index', sort: 'DESC' },
]);
pgm.addConstraint(
'subdomains',
'unique_fully_qualified_subdomain_tx_id_index_block_hash_microblock_hash',
'UNIQUE(fully_qualified_subdomain, tx_id, index_block_hash, microblock_hash)'
);
}

View File

@@ -9,6 +9,10 @@ export async function up(pgm: MigrationBuilder): Promise<void> {
type: 'serial',
primaryKey: true,
},
name: {
type: 'string',
notNull: true,
},
zonefile: {
type: 'string',
notNull: true,
@@ -16,8 +20,21 @@ export async function up(pgm: MigrationBuilder): Promise<void> {
zonefile_hash: {
type: 'string',
notNull: true,
},
tx_id: {
type: 'bytea',
notNull: false,
},
index_block_hash: {
type: 'bytea',
notNull: false,
}
});
pgm.createIndex('zonefiles', 'zonefile_hash', { method: 'hash' });
pgm.addIndex('zonefiles', 'zonefile_hash');
pgm.addConstraint(
'zonefiles',
'unique_name_zonefile_hash_tx_id_index_block_hash',
'UNIQUE(name, zonefile_hash, tx_id, index_block_hash)'
);
}

View File

@@ -13,6 +13,7 @@ import {
DbAssetEventTypeId,
DbBlock,
DbBnsName,
DbBnsNamespace,
DbEventTypeId,
DbFtEvent,
DbMempoolTx,
@@ -496,6 +497,49 @@ function testMinerReward(args?: TestMinerRewardArgs): DbMinerReward {
};
}
interface TestBnsNamespaceArgs {
namespace_id?: string;
address?: string;
launched_at?: number;
reveal_block?: number;
ready_block?: number;
buckets?: string;
base?: bigint;
coeff?: bigint;
nonalpha_discount?: number;
no_vowel_discount?: number;
lifetime?: number;
status?: string;
tx_id?: string;
tx_index?: number;
canonical?: boolean;
}
/**
* Generate a test BNS namespace
* @param args - Optional namespace data
* @returns `DbBnsNamespace`
*/
function testBnsNamespace(args?: TestBnsNamespaceArgs): DbBnsNamespace {
return {
namespace_id: args?.namespace_id ?? BNS_NAMESPACE_ID,
address: args?.address ?? SENDER_ADDRESS,
launched_at: args?.launched_at ?? BLOCK_HEIGHT,
reveal_block: args?.reveal_block ?? BLOCK_HEIGHT,
ready_block: args?.ready_block ?? BLOCK_HEIGHT,
buckets: args?.buckets ?? '1,1,1',
base: args?.base ?? 1n,
coeff: args?.coeff ?? 1n,
nonalpha_discount: args?.nonalpha_discount ?? 0,
no_vowel_discount: args?.no_vowel_discount ?? 0,
lifetime: args?.lifetime ?? 0,
status: args?.status ?? 'ready',
tx_id: args?.tx_id ?? TX_ID,
tx_index: args?.tx_index ?? 0,
canonical: args?.canonical ?? true,
};
}
interface TestBnsNameArgs {
name?: string;
address?: string;
@@ -655,12 +699,24 @@ export class TestBlockBuilder {
addTxBnsName(args?: TestBnsNameArgs): TestBlockBuilder {
const defaultArgs: TestBnsNameArgs = {
tx_id: this.txData.tx.tx_id,
tx_index: this.txIndex,
registered_at: this.block.block_height,
};
this.txData.names.push(testBnsName({ ...defaultArgs, ...args }));
return this;
}
addTxBnsNamespace(args?: TestBnsNamespaceArgs): TestBlockBuilder {
const defaultArgs: TestBnsNamespaceArgs = {
tx_id: this.txData.tx.tx_id,
tx_index: this.txIndex,
ready_block: this.block.block_height,
reveal_block: this.block.block_height,
};
this.txData.namespaces.push(testBnsNamespace({ ...defaultArgs, ...args }));
return this;
}
build(): DataStoreBlockUpdateData {
return this.data;
}
@@ -746,6 +802,15 @@ export class TestMicroblockStreamBuilder {
return this;
}
addTxBnsNamespace(args?: TestBnsNamespaceArgs): TestMicroblockStreamBuilder {
const defaultArgs: TestBnsNamespaceArgs = {
tx_id: this.txData.tx.tx_id,
tx_index: this.txIndex,
};
this.txData.namespaces.push(testBnsNamespace({ ...defaultArgs, ...args }));
return this;
}
build(): DataStoreMicroblockUpdateData {
return this.data;
}

View File

@@ -69,7 +69,7 @@ describe('BNS API tests', () => {
miner_txid: '0x4321',
canonical: true,
})
.addTx()
.addTx({ tx_id: '0x1234' })
.addTxNftEvent({
asset_event_type_id: DbAssetEventTypeId.Mint,
value: bnsNameCV('xyz.abc'),
@@ -92,8 +92,8 @@ describe('BNS API tests', () => {
const namespace: DbBnsNamespace = {
namespace_id: 'abc',
address: 'ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH',
base: 1,
coeff: 1,
base: 1n,
coeff: 1n,
launched_at: 14,
lifetime: 1,
no_vowel_discount: 1,
@@ -289,15 +289,15 @@ describe('BNS API tests', () => {
zonefile_offset: 0,
parent_zonefile_hash: 'p-test-hash',
parent_zonefile_index: 0,
block_height: dbBlock.block_height,
block_height: 2,
tx_index: 0,
tx_id: '',
tx_id: '0x22',
canonical: true,
};
await db.resolveBnsSubdomains(
{
index_block_hash: dbBlock.index_block_hash,
parent_index_block_hash: dbBlock.parent_index_block_hash,
index_block_hash: '0x02',
parent_index_block_hash: '0x1234',
microblock_hash: '',
microblock_sequence: I32_MAX,
microblock_canonical: true,
@@ -343,8 +343,8 @@ describe('BNS API tests', () => {
);
const query1 = await supertest(api.server).get(`/v1/names/invalid/zonefile/${zonefileHash}`);
expect(query1.status).toBe(400);
expect(query1.body.error).toBe('Invalid name or subdomain');
expect(query1.status).toBe(404);
expect(query1.body.error).toBe('No such name or zonefile');
expect(query1.type).toBe('application/json');
});
@@ -380,7 +380,7 @@ describe('BNS API tests', () => {
const query1 = await supertest(api.server).get(`/v1/names/${name}/zonefile/invalidHash`);
expect(query1.status).toBe(404);
expect(query1.body.error).toBe('No such zonefile');
expect(query1.body.error).toBe('No such name or zonefile');
expect(query1.type).toBe('application/json');
});
@@ -670,13 +670,13 @@ describe('BNS API tests', () => {
parent_zonefile_index: 0,
block_height: dbBlock.block_height,
tx_index: 0,
tx_id: '',
tx_id: '0x22',
canonical: true,
};
await db.resolveBnsSubdomains(
{
index_block_hash: dbBlock.index_block_hash,
parent_index_block_hash: dbBlock.parent_index_block_hash,
index_block_hash: '0x02',
parent_index_block_hash: '0x1234',
microblock_hash: '',
microblock_sequence: I32_MAX,
microblock_canonical: true,
@@ -694,8 +694,8 @@ describe('BNS API tests', () => {
test('Fail get zonefile by name - invalid name', async () => {
const query1 = await supertest(api.server).get(`/v1/names/invalidName/zonefile`);
expect(query1.status).toBe(400);
expect(query1.body.error).toBe('Invalid name or subdomain');
expect(query1.status).toBe(404);
expect(query1.body.error).toBe('No such name or zonefile does not exist');
expect(query1.type).toBe('application/json');
});
@@ -764,7 +764,7 @@ describe('BNS API tests', () => {
parent_zonefile_index: 0,
block_height: dbBlock.block_height,
tx_index: 0,
tx_id: '',
tx_id: '0x1234',
canonical: true,
};
await db.resolveBnsSubdomains(
@@ -782,6 +782,15 @@ describe('BNS API tests', () => {
`/v1/names/${subdomain.fully_qualified_subdomain}`
);
expect(query.status).toBe(200);
expect(query.body).toStrictEqual({
address: "test-address",
blockchain: "stacks",
last_txid: "0x1234",
resolver: "https://registrar.blockstack.org",
status: "registered_subdomain",
zonefile: "test",
zonefile_hash: "test-hash",
});
});
test('Success: fqn redirect test', async () => {
@@ -798,7 +807,7 @@ describe('BNS API tests', () => {
parent_zonefile_index: 0,
block_height: dbBlock.block_height,
tx_index: 0,
tx_id: '',
tx_id: '0x1234',
canonical: true,
};
await db.resolveBnsSubdomains(

View File

@@ -0,0 +1,95 @@
import {
parseNamespaceRawValue,
parseNameRawValue,
parseZoneFileTxt,
} from '../event-stream/bns/bns-helpers';
import * as zoneFileParser from 'zone-file';
describe('BNS helper tests', () => {
test('Success: namespace parsed', () => {
const expectedNamespace = {
namespace_id: 'xyz',
address: 'ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH',
base: 1n,
coeff: 1n,
launched_at: 14,
lifetime: 1,
no_vowel_discount: 1,
nonalpha_discount: 1,
ready_block: 4,
reveal_block: 6,
status: 'ready',
buckets: '1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1',
tx_id: '0x2114c8cda9e829f8b5d3c4163724ae9c4d9142d2bae4a35bffb006408d21c0ab',
index_block_hash: '0xdeadbeef',
};
const namespace = parseNamespaceRawValue(
// This value comes from Smart Contract Event (event.contract_event.raw_value)
'0x0c00000003096e616d657370616365020000000378797a0a70726f706572746965730c000000050b6c61756e636865642d61740a010000000000000000000000000000000e086c69666574696d650100000000000000000000000000000001106e616d6573706163652d696d706f7274051abf8e82623c380cd870931d48b525d5e12a4d67820e70726963652d66756e6374696f6e0c0000000504626173650100000000000000000000000000000001076275636b6574730b00000010010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000105636f6566660100000000000000000000000000000001116e6f2d766f77656c2d646973636f756e740100000000000000000000000000000001116e6f6e616c7068612d646973636f756e7401000000000000000000000000000000010b72657665616c65642d61740100000000000000000000000000000006067374617475730d000000057265616479',
4,
'0x2114c8cda9e829f8b5d3c4163724ae9c4d9142d2bae4a35bffb006408d21c0ab',
0
);
expect(namespace?.address).toEqual(expectedNamespace.address);
expect(namespace?.namespace_id).toEqual(expectedNamespace.namespace_id);
expect(namespace?.base).toEqual(expectedNamespace.base);
expect(namespace?.coeff).toEqual(expectedNamespace.coeff);
expect(namespace?.launched_at).toEqual(expectedNamespace.launched_at);
expect(namespace?.lifetime).toEqual(expectedNamespace.lifetime);
expect(namespace?.no_vowel_discount).toEqual(expectedNamespace.no_vowel_discount);
expect(namespace?.nonalpha_discount).toEqual(expectedNamespace.nonalpha_discount);
expect(namespace?.ready_block).toEqual(expectedNamespace.ready_block);
expect(namespace?.reveal_block).toEqual(expectedNamespace.reveal_block);
expect(namespace?.status).toEqual(expectedNamespace.status);
expect(namespace?.buckets).toEqual(expectedNamespace.buckets);
expect(namespace?.tx_id).toEqual(expectedNamespace.tx_id);
});
test('Success: parse name raw value', () => {
const expectedName = {
attachment: {
hash: 'c5217bcb3e52612ff7c835f9bb46a5f86aa73b8d',
metadata: {
name: 'abcdef',
namespace: 'xyz',
tx_sender: {
type: 0,
version: 26,
hash160: 'bf8e82623c380cd870931d48b525d5e12a4d6782',
},
op: 'name-import',
},
},
};
const expectedAttachment = expectedName.attachment;
const name = parseNameRawValue(
// This value comes from Smart Contract Event (event.contract_event.raw_value)
'0x0c000000010a6174746163686d656e740c00000003106174746163686d656e742d696e646578010000000000000000000000000000000004686173680200000014c5217bcb3e52612ff7c835f9bb46a5f86aa73b8d086d657461646174610c00000004046e616d650200000006616263646566096e616d657370616365020000000378797a026f700d0000000b6e616d652d696d706f72740974782d73656e646572051abf8e82623c380cd870931d48b525d5e12a4d6782'
);
const attachment = name.attachment;
expect(attachment.hash).toEqual(expectedAttachment.hash);
expect(attachment.metadata.name).toEqual(expectedAttachment.metadata.name);
expect(attachment.metadata.namespace).toEqual(expectedAttachment.metadata.namespace);
expect(attachment.metadata.op).toEqual(expectedAttachment.metadata.op);
expect(attachment.metadata.tx_sender.version).toEqual(
expectedAttachment.metadata.tx_sender.version
);
expect(attachment.metadata.tx_sender.hash160).toEqual(
expectedAttachment.metadata.tx_sender.hash160
);
});
test('Parse TXT', () => {
const subdomain = `$ORIGIN abcdef.xyz
$TTL 3600
asim IN TXT "owner=ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH" "seqn=0" "parts=1" "zf0=JE9SSUdJTiBhc2ltCiRUVEwgMzYwMApfaHR0cHMuX3RjcCBVUkkgMTAgMSAiaHR0cHM6Ly9nYWlhLmJsb2Nrc3RhY2sub3JnL2h1Yi9TVDJaUlgwSzI3R1cwU1AzR0pDRU1IRDk1VFFHSk1LQjdHOVkwWDFNSC9wcm9maWxlLmpzb24iCg=="
_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1M3325hr1utdv4HhSAfvYKhapzPP9Axhde/profile.json"
_resolver IN URI 10 1 "http://localhost:3000"
`;
const parsedZoneFile = zoneFileParser.parseZoneFile(subdomain);
const zoneFileTxt = parseZoneFileTxt(parsedZoneFile.txt?.[0].txt as string[]);
expect(zoneFileTxt.owner).toBe('ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH');
expect(zoneFileTxt.parts).toBe('1');
expect(zoneFileTxt.seqn).toBe('0');
});
});

View File

@@ -1,6 +1,3 @@
import { PgDataStore, cycleMigrations, runMigrations } from '../datastore/postgres-store';
import { PoolClient } from 'pg';
import { ApiServer, startApiServer } from '../api/init';
@@ -11,7 +8,6 @@ import { createHash } from 'crypto';
import { DbTx, DbTxStatus } from '../datastore/common';
import { AnchorMode, ChainID, PostConditionMode, someCV } from '@stacks/transactions';
import { StacksMocknet } from '@stacks/network';
import {
broadcastTransaction,
bufferCV,
@@ -26,8 +22,6 @@ import {
import BigNum = require('bn.js');
import { logger } from '../helpers';
import { testnetKeys } from '../api/routes/debug';
import { importV1BnsData } from '../import-v1';
import * as assert from 'assert';
import { TestBlockBuilder } from '../test-utils/test-builders';
function hash160(bfr: Buffer): Buffer {
@@ -101,10 +95,10 @@ describe('BNS integration tests', () => {
body: JSON.stringify(body),
headers: { 'Content-Type': 'application/json' },
});
const submitResult = await apiResult.json();
await apiResult.json();
const expectedTxId = '0x' + transaction.txid();
const result = await standByForTx(expectedTxId);
if (result.status != 1) logger.error('name-import error');
if (result.status != 1) throw new Error('result status error');
await standbyBnsName(expectedTxId);
return transaction;
}
@@ -173,10 +167,8 @@ describe('BNS integration tests', () => {
async function initiateNamespaceNetwork(namespace: string, salt: Buffer, namespaceHash: Buffer, testnetKey: TestnetKey, expiration: number){
while (true) {
try {
const preorderTransaction = await namespacePreorder(namespaceHash, testnetKey);
const revealTransaction = await namespaceReveal(namespace, salt, testnetKey, expiration);
await namespacePreorder(namespaceHash, testnetKey);
await namespaceReveal(namespace, salt, testnetKey, expiration);
break;
} catch (e) {
console.log('error connection', e);
@@ -194,13 +186,10 @@ describe('BNS integration tests', () => {
network,
anchorMode: AnchorMode.Any
};
const transaction = await makeContractCall(txOptions);
await broadcastTransaction(transaction, network);
const readyResult = await standByForTx('0x' + transaction.txid());
if (readyResult.status != 1) logger.error('namespace-ready error');
return transaction;
}
async function nameImport(namespace: string, zonefile: string, name: string, testnetKey: TestnetKey) {
@@ -479,7 +468,7 @@ describe('BNS integration tests', () => {
const zonefile = `$ORIGIN ${name}.${namespace}\n$TTL 3600\n_http._tcp IN URI 10 1 "https://blockstack.s3.amazonaws.com/${name}.${namespace}"\n`;
const importZonefile = `$ORIGIN ${name}.${namespace}\n$TTL 3600\n_http._tcp IN URI 10 1 "https://blockstack.s3.amazonaws.com/${name}.${namespace}"\n`;
const testnetKey = { pkey: testnetKeys[2].secretKey, address: testnetKeys[2].stacksAddress};
// initializing namespace network
// initializing namespace network
await initiateNamespaceNetwork(namespace, salt, namespaceHash, testnetKey, 12);
await namespaceReady(namespace, testnetKey.pkey);
@@ -515,7 +504,7 @@ describe('BNS integration tests', () => {
const namespaceHash = hash160(Buffer.concat([Buffer.from(namespace), salt]));
const testnetKey = { pkey: testnetKeys[4].secretKey, address: testnetKeys[4].stacksAddress};
const zonefile = `$ORIGIN ${name}.${namespace}\n$TTL 3600\n_http._tcp IN URI 10 1 "https://blockstack.s3.amazonaws.com/${name}.${namespace}"\n`;
// initializing namespace network
await initiateNamespaceNetwork(namespace, salt, namespaceHash, testnetKey, 12);
await nameImport(namespace, zonefile, name, testnetKey);
@@ -529,68 +518,78 @@ describe('BNS integration tests', () => {
expect(query1.body.status).toBe('name-revoke');
});
test('name-renewal contract call', async () => {
test('name-import/name-renewal contract call', async () => {
const zonefile = `new zone file`;
const namespace = 'name-renewal';
const name = 'renewal';
const namespaceHash = hash160(Buffer.concat([Buffer.from(namespace), salt]));
const testnetKey = { pkey: testnetKeys[5].secretKey, address: testnetKeys[5].stacksAddress};
// initializing namespace network
await initiateNamespaceNetwork(namespace, salt, namespaceHash, testnetKey, 1);
await nameImport(namespace, zonefile, name, testnetKey);
await namespaceReady(namespace, testnetKey.pkey);
//name renewal
// check expiration block
const query0 = await supertest(api.server).get(`/v1/names/${name}.${namespace}`);
expect(query0.status).toBe(200);
expect(query0.type).toBe('application/json');
expect(query0.body.expire_block).toBe(0); // Imported names don't know about their namespaces
// name renewal
await nameRenewal(namespace, zonefile, testnetKey.pkey, name);
try {
const query1 = await supertest(api.server).get(`/v1/names/${name}.${namespace}`);
expect(query1.status).toBe(200);
expect(query1.type).toBe('application/json');
expect(query1.body.zonefile).toBe(zonefile);
expect(query1.body.status).toBe('name-renewal');
} catch (err: any) {
throw new Error('Error post transaction: ' + err.message);
}
});
test('bns v1-import', async () => {
await importV1BnsData(db, 'src/tests-bns/import-test-files');
// test on-chain name import
const query1 = await supertest(api.server).get(`/v1/names/zumrai.id`);
const query1 = await supertest(api.server).get(`/v1/names/${name}.${namespace}`);
expect(query1.status).toBe(200);
expect(query1.type).toBe('application/json');
expect(query1.body).toEqual({
address: 'SP29EJ0SVM2TRZ3XGVTZPVTKF4SV1VMD8C0GA5SK5',
blockchain: 'stacks',
expire_block: 52595,
last_txid: '',
status: 'name-register',
zonefile:
'$ORIGIN zumrai.id\n$TTL 3600\n_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1EPno1VcdGx89ukN2we4iVpnFtkHzw8i5d/profile.json"\n\n',
zonefile_hash: '853cd126478237bc7392e65091f7ffa5a1556a33',
});
expect(query1.body.zonefile).toBe(zonefile);
expect(query1.body.status).toBe('name-renewal');
// test subdomain import
const query2 = await supertest(api.server).get(`/v1/names/flushreset.id.blockstack`);
// Name should appear only once in namespace list
const query2 = await supertest(api.server).get(`/v1/namespaces/${namespace}/names`);
expect(query2.status).toBe(200);
expect(query2.type).toBe('application/json');
expect(query2.body).toEqual({
address: 'SP2S2F9TCAT43KEJT02YTG2NXVCPZXS1426T63D9H',
blockchain: 'stacks',
last_txid: '',
resolver: 'https://registrar.blockstack.org',
status: 'registered_subdomain',
zonefile:
'$ORIGIN flushreset.id.blockstack\n$TTL 3600\n_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1HEznKZ7mK5fmibweM7eAk8SwRgJ1bWY92/profile.json"\n\n',
zonefile_hash: '14dc091ebce8ea117e1276d802ee903cc0fdde81',
});
expect(query2.body).toStrictEqual(["renewal.name-renewal"]);
const dbquery = await db.getSubdomain({ subdomain: `flushreset.id.blockstack`, includeUnanchored: false });
assert(dbquery.found)
if (dbquery.result){
expect(dbquery.result.name).toBe('id.blockstack');}
// check new expiration block, should not be 0
const query3 = await supertest(api.server).get(`/v1/names/${name}.${namespace}`);
expect(query3.status).toBe(200);
expect(query3.type).toBe('application/json');
expect(query3.body.expire_block).not.toBe(0);
});
test('name-register/name-renewal contract call', async () => {
const saltName = '0000';
const zonefile = `new zone file`;
const namespace = 'name-renewal2';
const name = 'renewal2';
const namespaceHash = hash160(Buffer.concat([Buffer.from(namespace), salt]));
const testnetKey = { pkey: testnetKeys[5].secretKey, address: testnetKeys[5].stacksAddress};
// initializing namespace network
await initiateNamespaceNetwork(namespace, salt, namespaceHash, testnetKey, 1);
await namespaceReady(namespace, testnetKey.pkey);
await nameRegister(namespace, saltName, zonefile, testnetKey, name);
// check expiration block, should not be 0
const query0 = await supertest(api.server).get(`/v1/names/${name}.${namespace}`);
expect(query0.status).toBe(200);
expect(query0.type).toBe('application/json');
expect(query0.body.expire_block).not.toBe(0);
const prevExpiration = query0.body.expire_block;
// name renewal
await nameRenewal(namespace, zonefile, testnetKey.pkey, name);
const query1 = await supertest(api.server).get(`/v1/names/${name}.${namespace}`);
expect(query1.status).toBe(200);
expect(query1.type).toBe('application/json');
expect(query1.body.zonefile).toBe(zonefile);
expect(query1.body.status).toBe('name-renewal');
// check new expiration block, should be greater than the previous one
const query3 = await supertest(api.server).get(`/v1/names/${name}.${namespace}`);
expect(query3.status).toBe(200);
expect(query3.type).toBe('application/json');
expect(query3.body.expire_block > prevExpiration).toBe(true);
});
afterAll(async () => {

View File

@@ -0,0 +1,305 @@
import { ChainID } from '@stacks/transactions';
import { PgDataStore, cycleMigrations, runMigrations } from '../datastore/postgres-store';
import { PoolClient } from 'pg';
import { bnsNameCV, httpPostRequest } from '../helpers';
import { EventStreamServer, startEventServer } from '../event-stream/event-server';
import { TestBlockBuilder, TestMicroblockStreamBuilder } from '../test-utils/test-builders';
import { DbAssetEventTypeId, DbBnsZoneFile } from '../datastore/common';
describe('BNS event server tests', () => {
let db: PgDataStore;
let client: PoolClient;
let eventServer: EventStreamServer;
beforeEach(async () => {
process.env.PG_DATABASE = 'postgres';
await cycleMigrations();
db = await PgDataStore.connect({ usageName: 'tests', withNotifier: false });
client = await db.pool.connect();
eventServer = await startEventServer({
datastore: db,
chainId: ChainID.Mainnet,
serverHost: '127.0.0.1',
serverPort: 0,
httpLogLevel: 'debug',
});
});
test('namespace-ready called by a contract other than BNS', async () => {
const block = new TestBlockBuilder({
block_height: 1,
index_block_hash: '0x29fe7ba9674b9196fefa28764a35a4603065dc25c9dcf83c56648066f36a8dce',
burn_block_height: 749661,
burn_block_hash: '0x000000000000000000021e9777470811a937006cf47efceadefca2e8031c4b5f',
burn_block_time: 1660638853,
})
.addTx()
.build();
await db.update(block);
const microblock = new TestMicroblockStreamBuilder()
.addMicroblock({
microblock_hash: '0x8455c986ef89d09968b96fee0ef5b4625aa3860aa68e70123efa129f48e55c6b',
microblock_sequence: 0,
parent_index_block_hash: '0x29fe7ba9674b9196fefa28764a35a4603065dc25c9dcf83c56648066f36a8dce'
})
.build();
await db.updateMicroblocks(microblock);
const payload = {
"events": [
{
"txid": "0x605aa0554fb5ee7995f9780aa54d63b3d32550b0def95e31bdf3beb0fedefdae",
"type": "contract_event",
"committed": true,
"event_index": 50,
"contract_event": {
"topic": "print",
"raw_value": "0x0c00000003096e616d65737061636502000000046672656e0a70726f706572746965730c000000061963616e2d7570646174652d70726963652d66756e6374696f6e030b6c61756e636865642d61740a0100000000000000000000000000011886086c69666574696d65010000000000000000000000000000cd50106e616d6573706163652d696d706f727406161809f2ab9182b6ff1678f82846131c0709e51cf914636f6d6d756e6974792d68616e646c65732d76320e70726963652d66756e6374696f6e0c000000050462617365010000000c9f2c9cd04674edea3fffffff076275636b6574730b00000010010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000105636f6566660100000000000000000000000000000001116e6f2d766f77656c2d646973636f756e740100000000000000000000000000000001116e6f6e616c7068612d646973636f756e7401000000000000000000000000000000010b72657665616c65642d61740100000000000000000000000000011886067374617475730d000000057265616479",
"contract_identifier": "SP000000000000000000002Q6VF78.bns"
}
}
],
"block_hash": "0x6be6bfbf5e63ee4333c794b0489a791625ad0724722647b748379fe916bbff55",
"miner_txid": "0x1c01668438115f757cfc14210f7f7ba0bee7f9d235c44b8e35c8653ac5879205",
"block_height": 2,
"transactions": [
{
"txid": "0x605aa0554fb5ee7995f9780aa54d63b3d32550b0def95e31bdf3beb0fedefdae",
"raw_tx": "0x000000000104001809f2ab9182b6ff1678f82846131c0709e51cf900000000000000110000000000000bb80001e2ae2533ed444dcc3dc0118da5c8bbfe5da4c1943b63e3fd9b7389e3f7f384ee417a65d899182ff7791b174a426b947860df5b4006a0cb767aca275af847428d03020000000002161809f2ab9182b6ff1678f82846131c0709e51cf914636f6d6d756e6974792d68616e646c65732d7632106e616d6573706163652d72657665616c0000000402000000046672656e0200000003626f74010000000000000000000000000000cd5009",
"status": "success",
"tx_index": 46,
"raw_result": "0x0703",
"contract_abi": null,
"execution_cost": {
"runtime": 201050,
"read_count": 20,
"read_length": 92368,
"write_count": 4,
"write_length": 1386
},
"microblock_hash": "0x8455c986ef89d09968b96fee0ef5b4625aa3860aa68e70123efa129f48e55c6b",
"microblock_sequence": 0,
"microblock_parent_hash": "0xea7982ba6a5206b9efc2ab2567eedef3babae4d167619bdc74c7e148717dc208"
}
],
"anchored_cost": {
"runtime": 19669668,
"read_count": 1420,
"read_length": 8457322,
"write_count": 143,
"write_length": 9331
},
"burn_block_hash": "0x00000000000000000004afca18622e18a1f36ff19dc1aece341868c042b7f4ac",
"burn_block_time": 1660639379,
"index_block_hash": "0xd3944c1cf261982ad5d86ad14b1545a2393c0039e378706323927b3a7031a621",
"burn_block_height": 749662,
"parent_block_hash": "0xea7982ba6a5206b9efc2ab2567eedef3babae4d167619bdc74c7e148717dc208",
"parent_microblock": "0x8455c986ef89d09968b96fee0ef5b4625aa3860aa68e70123efa129f48e55c6b",
"matured_miner_rewards": [],
"parent_burn_block_hash": "0x000000000000000000021e9777470811a937006cf47efceadefca2e8031c4b5f",
"parent_index_block_hash": "0x29fe7ba9674b9196fefa28764a35a4603065dc25c9dcf83c56648066f36a8dce",
"parent_burn_block_height": 749661,
"confirmed_microblocks_cost": {
"runtime": 174668984,
"read_count": 12067,
"read_length": 54026355,
"write_count": 1701,
"write_length": 134399
},
"parent_microblock_sequence": 0,
"parent_burn_block_timestamp": 1660638853
};
await httpPostRequest({
host: '127.0.0.1',
port: eventServer.serverAddress.port,
path: '/new_block',
headers: { 'Content-Type': 'application/json' },
body: Buffer.from(JSON.stringify(payload), 'utf8'),
throwOnNotOK: true,
});
const namespaces = await db.getNamespaceList({ includeUnanchored: true });
expect(namespaces.results).toStrictEqual(['fren']);
const namespace = await db.getNamespace({ namespace: 'fren', includeUnanchored: true });
expect(namespace.found).toBe(true);
expect(namespace.result?.namespace_id).toBe('fren');
expect(namespace.result?.lifetime).toBe(52560);
expect(namespace.result?.status).toBe('ready');
expect(namespace.result?.ready_block).toBe(2);
});
test('/attachments/new with re-orged zonefiles', async () => {
const block1 = new TestBlockBuilder({
block_height: 1,
index_block_hash: '0x0101',
})
.addTx()
.addTxBnsNamespace({ namespace_id: 'btc' })
.addTxBnsName({ name: 'jnj.btc', namespace_id: 'btc' })
.addTxNftEvent({
asset_event_type_id: DbAssetEventTypeId.Mint,
value: bnsNameCV('jnj.btc'),
asset_identifier: 'SP000000000000000000002Q6VF78.bns::names',
recipient: 'ST5RRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1ZA',
})
.build();
await db.update(block1);
const block2 = new TestBlockBuilder({
block_height: 2,
index_block_hash: '0x0200',
parent_index_block_hash: '0x0101'
})
.addTx({ tx_id: '0x1212' })
.addTxBnsName({
name: 'jnj.btc',
namespace_id: 'btc',
status: 'name-update', // Canonical update
tx_id: '0x1212',
zonefile_hash: '0x9198e0b61a029671e53bd59aa229e7ae05af35a3'
})
.build();
await db.update(block2);
const block2b = new TestBlockBuilder({
block_height: 2,
index_block_hash: '0x0201',
parent_index_block_hash: '0x0101'
})
.addTx({ tx_id: '0x121266' })
.addTxBnsName({
name: 'jnj.btc',
namespace_id: 'btc',
status: 'name-update', // Non-canonical update
tx_id: '0x121266',
zonefile_hash: '0xffff'
})
.build();
await db.update(block2b);
const block3 = new TestBlockBuilder({
block_height: 3,
index_block_hash: '0x0300',
parent_index_block_hash: '0x0200'
})
.addTx({ tx_id: '0x3333' })
.build();
await db.update(block3);
const payload = [
{
"tx_id": "0x1212", // Canonical
"content": "0x244f524947494e206a6e6a2e6274632e0a2454544c20333630300a5f687474702e5f74637009494e095552490931300931092268747470733a2f2f676169612e626c6f636b737461636b2e6f72672f6875622f317a38417a79684334326e3854766f4661554c326e7363614347487151515755722f70726f66696c652e6a736f6e220a0a",
"metadata": "0x0c00000004046e616d6502000000036a6e6a096e616d6573706163650200000003627463026f700d0000000d6e616d652d72656769737465720974782d73656e64657205163763c6b37100efa8261e5fc1b1e8c18cd3fed9b6",
"contract_id": "SP000000000000000000002Q6VF78.bns",
"block_height": 17307,
"content_hash": "0x9198e0b61a029671e53bd59aa229e7ae05af35a3",
"attachment_index": 823,
"index_block_hash": "0x0200"
},
{
"tx_id": "0x121266", // Non-canonical
"content": "0x",
"metadata": "0x0c00000004046e616d6502000000036a6e6a096e616d6573706163650200000003627463026f700d0000000d6e616d652d72656769737465720974782d73656e64657205163763c6b37100efa8261e5fc1b1e8c18cd3fed9b6",
"contract_id": "SP000000000000000000002Q6VF78.bns",
"block_height": 17307,
"content_hash": "0xffff",
"attachment_index": 823,
"index_block_hash": "0x0201"
},
];
await httpPostRequest({
host: '127.0.0.1',
port: eventServer.serverAddress.port,
path: '/attachments/new',
headers: { 'Content-Type': 'application/json' },
body: Buffer.from(JSON.stringify(payload), 'utf8'),
throwOnNotOK: true,
});
const name = await db.getName({ name: 'jnj.btc', chainId: ChainID.Mainnet, includeUnanchored: true });
expect(name.found).toBe(true);
expect(name.result?.zonefile_hash).toBe('9198e0b61a029671e53bd59aa229e7ae05af35a3');
expect(name.result?.index_block_hash).toBe('0x0200');
expect(name.result?.tx_id).toBe('0x1212');
expect(name.result?.status).toBe('name-update');
});
test('/attachments/new with duplicate zonefiles for the same tx', async () => {
const block1 = new TestBlockBuilder({
block_height: 1,
index_block_hash: '0x0101',
})
.addTx({ tx_id: '0x1234' })
.addTxBnsNamespace({ namespace_id: 'btc' })
.addTxBnsName({
name: 'jnj.btc',
namespace_id: 'btc',
zonefile_hash: '0x9198e0b61a029671e53bd59aa229e7ae05af35a3'
})
.addTxNftEvent({
asset_event_type_id: DbAssetEventTypeId.Mint,
value: bnsNameCV('jnj.btc'),
asset_identifier: 'SP000000000000000000002Q6VF78.bns::names',
recipient: 'ST5RRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1ZA',
})
.build();
await db.update(block1);
const payload = [
{
"tx_id": "0x1234",
"content": "0x",
"metadata": "0x0c00000004046e616d6502000000036a6e6a096e616d6573706163650200000003627463026f700d0000000d6e616d652d72656769737465720974782d73656e64657205163763c6b37100efa8261e5fc1b1e8c18cd3fed9b6",
"contract_id": "SP000000000000000000002Q6VF78.bns",
"block_height": 1,
"content_hash": "0x9198e0b61a029671e53bd59aa229e7ae05af35a3",
"attachment_index": 823,
"index_block_hash": "0x0101"
},
{
"tx_id": "0x1234",
"content": "0x244f524947494e206a6e6a2e6274632e0a2454544c20333630300a5f687474702e5f74637009494e095552490931300931092268747470733a2f2f676169612e626c6f636b737461636b2e6f72672f6875622f317a38417a79684334326e3854766f4661554c326e7363614347487151515755722f70726f66696c652e6a736f6e220a0a",
"metadata": "0x0c00000004046e616d6502000000036a6e6a096e616d6573706163650200000003627463026f700d0000000d6e616d652d72656769737465720974782d73656e64657205163763c6b37100efa8261e5fc1b1e8c18cd3fed9b6",
"contract_id": "SP000000000000000000002Q6VF78.bns",
"block_height": 1,
"content_hash": "0x9198e0b61a029671e53bd59aa229e7ae05af35a3", // Same zonefile_hash but different content, this should overwrite the entry above
"attachment_index": 823,
"index_block_hash": "0x0101"
},
{
"tx_id": "0x1234",
"content": "0x244f524947494e206a6e6a2e6274632e0a2454544c20333630300a5f687474702e5f74637009494e095552490931300931092268747470733a2f2f676169612e626c6f636b737461636b2e6f72672f6875622f317a38417a79684334326e3854766f4661554c326e7363614347487151515755722f70726f66696c652e6a736f6e220a0a",
"metadata": "0x0c00000004046e616d6502000000036a6e6a096e616d6573706163650200000003627463026f700d0000000d6e616d652d72656769737465720974782d73656e64657205163763c6b37100efa8261e5fc1b1e8c18cd3fed9b6",
"contract_id": "SP000000000000000000002Q6VF78.bns",
"block_height": 1,
"content_hash": "0x9198e0b61a029671e53bd59aa229e7ae05af35a3", // Also overwrite
"attachment_index": 823,
"index_block_hash": "0x0101"
},
];
await httpPostRequest({
host: '127.0.0.1',
port: eventServer.serverAddress.port,
path: '/attachments/new',
headers: { 'Content-Type': 'application/json' },
body: Buffer.from(JSON.stringify(payload), 'utf8'),
throwOnNotOK: true,
});
// To validate table data we'll query it directly. There should only be one zonefile.
const result = await client.query<DbBnsZoneFile>(`SELECT * FROM zonefiles`);
expect(result.rowCount).toBe(1);
expect(result.rows[0].zonefile).toBe('$ORIGIN jnj.btc.\n$TTL 3600\n_http._tcp\tIN\tURI\t10\t1\t"https://gaia.blockstack.org/hub/1z8AzyhC42n8TvoFaUL2nscaCGHqQQWUr/profile.json"\n\n');
});
afterEach(async () => {
await eventServer.closeAsync();
client.release();
await db?.close();
await runMigrations(undefined, 'down');
});
});

View File

@@ -0,0 +1,161 @@
import { PgDataStore, cycleMigrations, runMigrations } from '../datastore/postgres-store';
import { PoolClient } from 'pg';
import { ApiServer, startApiServer } from '../api/init';
import * as supertest from 'supertest';
import { startEventServer } from '../event-stream/event-server';
import { Server } from 'net';
import { ChainID } from '@stacks/transactions';
import { importV1BnsData } from '../import-v1';
import * as assert from 'assert';
import { TestBlockBuilder } from '../test-utils/test-builders';
describe('BNS V1 import', () => {
let db: PgDataStore;
let client: PoolClient;
let eventServer: Server;
let api: ApiServer;
beforeEach(async () => {
process.env.PG_DATABASE = 'postgres';
await cycleMigrations();
db = await PgDataStore.connect({ usageName: 'tests' });
client = await db.pool.connect();
eventServer = await startEventServer({ datastore: db, chainId: ChainID.Testnet, httpLogLevel: 'silly' });
api = await startApiServer({ datastore: db, chainId: ChainID.Testnet, httpLogLevel: 'silly' });
const block = new TestBlockBuilder().build();
await db.update(block);
});
test('v1-import', async () => {
await importV1BnsData(db, 'src/tests-bns/import-test-files');
// Names
const query1 = await supertest(api.server).get(`/v1/names/zumrai.id`);
expect(query1.status).toBe(200);
expect(query1.type).toBe('application/json');
expect(query1.body).toEqual({
address: 'SP29EJ0SVM2TRZ3XGVTZPVTKF4SV1VMD8C0GA5SK5',
blockchain: 'stacks',
expire_block: 52595,
last_txid: '',
status: 'name-register',
zonefile:
'$ORIGIN zumrai.id\n$TTL 3600\n_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1EPno1VcdGx89ukN2we4iVpnFtkHzw8i5d/profile.json"\n\n',
zonefile_hash: '853cd126478237bc7392e65091f7ffa5a1556a33',
});
const query2 = await supertest(api.server).get(`/v1/names/zumrai.id/zonefile/853cd126478237bc7392e65091f7ffa5a1556a33`);
expect(query2.status).toBe(200);
expect(query2.type).toBe('application/json');
expect(query2.body).toEqual({
zonefile: '$ORIGIN zumrai.id\n$TTL 3600\n_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1EPno1VcdGx89ukN2we4iVpnFtkHzw8i5d/profile.json"\n\n',
});
const query3 = await supertest(api.server).get(`/v1/names/zumrai.id/zonefile`);
expect(query3.status).toBe(200);
expect(query3.type).toBe('application/json');
expect(query3.body).toEqual({
zonefile: '$ORIGIN zumrai.id\n$TTL 3600\n_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1EPno1VcdGx89ukN2we4iVpnFtkHzw8i5d/profile.json"\n\n',
});
const query4 = await supertest(api.server).get(`/v1/names/id.blockstack/subdomains`);
expect(query4.status).toBe(200);
expect(query4.type).toBe('application/json');
expect(query4.body.sort()).toStrictEqual([
"12312313231.id.blockstack", "aichamez.id.blockstack", "ale082308as.id.blockstack",
"alejandro772.id.blockstack", "alkorsandor8_2.id.blockstack", "amir4good.id.blockstack",
"anasa680.id.blockstack", "ancafajardo.id.blockstack", "angelessebastian.id.blockstack",
"blafus3l.id.blockstack", "caomicoje.id.blockstack", "con_adrada34516.id.blockstack",
"cryptichorizon.id.blockstack", "drgenius.id.blockstack", "drifting_dude.id.blockstack",
"enavarrocollin.id.blockstack", "entryist.id.blockstack", "flushreset.id.blockstack",
"harukoscarlet.id.blockstack", "hintonh924.id.blockstack", "johnkinney.id.blockstack",
"jokialternative.id.blockstack", "joren_instance.id.blockstack", "kerodriguez.id.blockstack",
"krishares10.id.blockstack", "liviaelyse.id.blockstack", "luke_mwenya1.id.blockstack",
"milkyymocha.id.blockstack", "mithical.id.blockstack", "mrbotham.id.blockstack",
"mymansgotabeefy1.id.blockstack", "neelyblake996.id.blockstack", "nihal_t_m.id.blockstack",
"okamii63.id.blockstack", "robertascardoso.id.blockstack", "sheridoug.id.blockstack",
"sipapi19.id.blockstack", "slemanb44.id.blockstack", "slimttfu.id.blockstack",
"splevine.id.blockstack", "sportsman66.id.blockstack", "starbvuks.id.blockstack",
"subtly_fresh.id.blockstack", "svirchok.id.blockstack", "theironcook.id.blockstack",
"thingnotok.id.blockstack", "ujku1977.id.blockstack", "yanadda9.id.blockstack",
"yoemmx00.id.blockstack", "zachgaming.id.blockstack"
].sort());
const query5 = await supertest(api.server).get(`/v1/names/`);
expect(query5.status).toBe(200);
expect(query5.type).toBe('application/json');
expect(query5.body.sort()).toStrictEqual([
"0.id", "1.id", "10.id", "10x.id", "111111111.id", "123.id", "zinai.id", "zlh.id",
"zone117x.id", "zumminer_crux.id", "zumminer_dev_crux.id", "zumrai.id",
].sort());
// Namespaces
const query6 = await supertest(api.server).get(`/v1/namespaces/`);
expect(query6.status).toBe(200);
expect(query6.type).toBe('application/json');
expect(query6.body).toEqual({
namespaces: ["blockstack", "graphite", "helloworld", "id", "podcast"]
});
const query7 = await supertest(api.server).get(`/v1/namespaces/id/names`);
expect(query7.status).toBe(200);
expect(query7.type).toBe('application/json');
expect(query7.body.sort()).toStrictEqual([
"0.id", "1.id", "10.id", "10x.id", "111111111.id", "123.id", "zinai.id", "zlh.id",
"zone117x.id", "zumminer_crux.id", "zumminer_dev_crux.id", "zumrai.id"
].sort());
// Addresses
const query8 = await supertest(api.server).get(`/v1/addresses/stacks/SP1HPCXTGV31W5659M3WTBEFP5AN55HV4B1Q9T31F`);
expect(query8.status).toBe(200);
expect(query8.type).toBe('application/json');
expect(query8.body).toEqual({
names: ["0.id"]
});
// Subdomains
const query9 = await supertest(api.server).get(`/v1/names/flushreset.id.blockstack`);
expect(query9.status).toBe(200);
expect(query9.type).toBe('application/json');
expect(query9.body).toEqual({
address: 'SP2S2F9TCAT43KEJT02YTG2NXVCPZXS1426T63D9H',
blockchain: 'stacks',
last_txid: '',
resolver: 'https://registrar.blockstack.org',
status: 'registered_subdomain',
zonefile:
'$ORIGIN flushreset.id.blockstack\n$TTL 3600\n_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1HEznKZ7mK5fmibweM7eAk8SwRgJ1bWY92/profile.json"\n\n',
zonefile_hash: '14dc091ebce8ea117e1276d802ee903cc0fdde81',
});
const query10 = await supertest(api.server).get(`/v1/names/flushreset.id.blockstack/zonefile/14dc091ebce8ea117e1276d802ee903cc0fdde81`);
expect(query10.status).toBe(200);
expect(query10.type).toBe('application/json');
expect(query10.body).toEqual({
zonefile:
'$ORIGIN flushreset.id.blockstack\n$TTL 3600\n_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1HEznKZ7mK5fmibweM7eAk8SwRgJ1bWY92/profile.json"\n\n',
});
const query11 = await supertest(api.server).get(`/v1/names/flushreset.id.blockstack/zonefile`);
expect(query11.status).toBe(200);
expect(query11.type).toBe('application/json');
expect(query11.body).toEqual({
zonefile:
'$ORIGIN flushreset.id.blockstack\n$TTL 3600\n_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1HEznKZ7mK5fmibweM7eAk8SwRgJ1bWY92/profile.json"\n\n',
});
const dbquery = await db.getSubdomain({ subdomain: `flushreset.id.blockstack`, includeUnanchored: false });
assert(dbquery.found)
if (dbquery.result){
expect(dbquery.result.name).toBe('id.blockstack');}
});
afterEach(async () => {
await new Promise(resolve => eventServer.close(() => resolve(true)));
await api.terminate();
client.release();
await db?.close();
await runMigrations(undefined, 'down');
});
});

View File

@@ -1,95 +0,0 @@
import { parseNamespaceRawValue, parseNameRawValue, parseZoneFileTxt } from '../bns-helpers';
import * as zoneFileParser from 'zone-file';
test('Success: namespace parsed', () => {
const expectedNamespace = {
namespace_id: 'xyz',
address: 'ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH',
base: 1,
coeff: 1,
launched_at: 14,
lifetime: 1,
no_vowel_discount: 1,
nonalpha_discount: 1,
ready_block: 4,
reveal_block: 6,
status: 'ready',
buckets: '1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1',
tx_id: '0x2114c8cda9e829f8b5d3c4163724ae9c4d9142d2bae4a35bffb006408d21c0ab',
index_block_hash: '0xdeadbeef',
};
const namespace = parseNamespaceRawValue(
// This value comes from Smart Contract Event (event.contract_event.raw_value)
'0x0c00000003096e616d657370616365020000000378797a0a70726f706572746965730c000000050b6c61756e636865642d61740a010000000000000000000000000000000e086c69666574696d650100000000000000000000000000000001106e616d6573706163652d696d706f7274051abf8e82623c380cd870931d48b525d5e12a4d67820e70726963652d66756e6374696f6e0c0000000504626173650100000000000000000000000000000001076275636b6574730b00000010010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000101000000000000000000000000000000010100000000000000000000000000000001010000000000000000000000000000000105636f6566660100000000000000000000000000000001116e6f2d766f77656c2d646973636f756e740100000000000000000000000000000001116e6f6e616c7068612d646973636f756e7401000000000000000000000000000000010b72657665616c65642d61740100000000000000000000000000000006067374617475730d000000057265616479',
4,
'0x2114c8cda9e829f8b5d3c4163724ae9c4d9142d2bae4a35bffb006408d21c0ab',
0
);
expect(namespace?.address).toEqual(expectedNamespace.address);
expect(namespace?.namespace_id).toEqual(expectedNamespace.namespace_id);
expect(namespace?.base).toEqual(expectedNamespace.base);
expect(namespace?.coeff).toEqual(expectedNamespace.coeff);
expect(namespace?.launched_at).toEqual(expectedNamespace.launched_at);
expect(namespace?.lifetime).toEqual(expectedNamespace.lifetime);
expect(namespace?.no_vowel_discount).toEqual(expectedNamespace.no_vowel_discount);
expect(namespace?.nonalpha_discount).toEqual(expectedNamespace.nonalpha_discount);
expect(namespace?.ready_block).toEqual(expectedNamespace.ready_block);
expect(namespace?.reveal_block).toEqual(expectedNamespace.reveal_block);
expect(namespace?.status).toEqual(expectedNamespace.status);
expect(namespace?.buckets).toEqual(expectedNamespace.buckets);
expect(namespace?.tx_id).toEqual(expectedNamespace.tx_id);
});
test('Success: parse name raw value', () => {
const expectedName = {
attachment: {
hash: 'c5217bcb3e52612ff7c835f9bb46a5f86aa73b8d',
metadata: {
name: 'abcdef',
namespace: 'xyz',
tx_sender: {
type: 0,
version: 26,
hash160: 'bf8e82623c380cd870931d48b525d5e12a4d6782',
},
op: 'name-import',
},
},
};
const expectedAttachment = expectedName.attachment;
const name = parseNameRawValue(
// This value comes from Smart Contract Event (event.contract_event.raw_value)
'0x0c000000010a6174746163686d656e740c00000003106174746163686d656e742d696e646578010000000000000000000000000000000004686173680200000014c5217bcb3e52612ff7c835f9bb46a5f86aa73b8d086d657461646174610c00000004046e616d650200000006616263646566096e616d657370616365020000000378797a026f700d0000000b6e616d652d696d706f72740974782d73656e646572051abf8e82623c380cd870931d48b525d5e12a4d6782'
);
const attachment = name.attachment;
expect(attachment.hash).toEqual(expectedAttachment.hash);
expect(attachment.metadata.name).toEqual(expectedAttachment.metadata.name);
expect(attachment.metadata.namespace).toEqual(expectedAttachment.metadata.namespace);
expect(attachment.metadata.op).toEqual(expectedAttachment.metadata.op);
expect(attachment.metadata.tx_sender.version).toEqual(
expectedAttachment.metadata.tx_sender.version
);
expect(attachment.metadata.tx_sender.hash160).toEqual(
expectedAttachment.metadata.tx_sender.hash160
);
});
test('Parse TXT', () => {
const subdomain = `$ORIGIN abcdef.xyz
$TTL 3600
asim IN TXT "owner=ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH" "seqn=0" "parts=1" "zf0=JE9SSUdJTiBhc2ltCiRUVEwgMzYwMApfaHR0cHMuX3RjcCBVUkkgMTAgMSAiaHR0cHM6Ly9nYWlhLmJsb2Nrc3RhY2sub3JnL2h1Yi9TVDJaUlgwSzI3R1cwU1AzR0pDRU1IRDk1VFFHSk1LQjdHOVkwWDFNSC9wcm9maWxlLmpzb24iCg=="
_http._tcp IN URI 10 1 "https://gaia.blockstack.org/hub/1M3325hr1utdv4HhSAfvYKhapzPP9Axhde/profile.json"
_resolver IN URI 10 1 "http://localhost:3000"
`;
const parsedZoneFile = zoneFileParser.parseZoneFile(subdomain);
const zoneFileTxt = parseZoneFileTxt(parsedZoneFile.txt?.[0].txt as string[]);
expect(zoneFileTxt.owner).toBe('ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH');
expect(zoneFileTxt.parts).toBe('1');
expect(zoneFileTxt.seqn).toBe('0');
});

View File

@@ -2761,8 +2761,8 @@ describe('postgres datastore', () => {
tx_index: 0,
namespace_id: 'abc',
address: 'ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH',
base: 1,
coeff: 1,
base: 1n,
coeff: 1n,
launched_at: 14,
lifetime: 1,
no_vowel_discount: 1,
@@ -3765,8 +3765,8 @@ describe('postgres datastore', () => {
{
namespace_id: 'abc',
address: 'ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH',
base: 1,
coeff: 1,
base: 1n,
coeff: 1n,
launched_at: 14,
lifetime: 1,
no_vowel_discount: 1,
@@ -3959,8 +3959,8 @@ describe('postgres datastore', () => {
{
namespace_id: 'abc',
address: 'ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH',
base: 1,
coeff: 1,
base: 1n,
coeff: 1n,
launched_at: 14,
lifetime: 1,
no_vowel_discount: 1,
@@ -4481,8 +4481,8 @@ describe('postgres datastore', () => {
const namespace: DbBnsNamespace = {
namespace_id: 'abc',
address: 'ST2ZRX0K27GW0SP3GJCEMHD95TQGJMKB7G9Y0X1MH',
base: 1,
coeff: 1,
base: 1n,
coeff: 1n,
launched_at: dbBlock.block_height,
lifetime: 1,
no_vowel_discount: 1,
@@ -4615,8 +4615,7 @@ describe('postgres datastore', () => {
const subdomains: DbBnsSubdomain[] = [];
subdomains.push(subdomain);
await db.updateBatchSubdomains(
client,
await db.resolveBnsSubdomains(
{
index_block_hash: dbBlock.index_block_hash,
parent_index_block_hash: dbBlock.parent_index_block_hash,