fix: sql optimizations to speed up various tx queries

This commit is contained in:
Matthew Little
2021-09-03 14:15:08 -06:00
committed by GitHub
parent d9e1131f83
commit 10b1c67d20
6 changed files with 74 additions and 27 deletions

4
.env
View File

@@ -6,6 +6,10 @@ PG_DATABASE=stacks_blockchain_api
PG_SCHEMA=public
PG_SSL=false
# Limit to how many concurrent connections can be created, defaults to 10
# See https://node-postgres.com/api/pool
# PG_CONNECTION_POOL_MAX=10
# The connection URI below can be used in place of the PG variables above,
# but if enabled it must be defined without others or omitted.
# PG_CONNECTION_URI=

View File

@@ -564,6 +564,10 @@ export interface GetTxArgs {
includeUnanchored: boolean;
}
export interface GetTxFromDbTxArgs extends GetTxArgs {
dbTx: DbTx;
}
export interface GetTxWithEventsArgs extends GetTxArgs {
eventLimit: number;
eventOffset: number;
@@ -787,14 +791,19 @@ export async function getMempoolTxFromDataStore(
export async function getTxFromDataStore(
db: DataStore,
args: GetTxArgs | GetTxWithEventsArgs
args: GetTxArgs | GetTxWithEventsArgs | GetTxFromDbTxArgs
): Promise<FoundOrNot<Transaction>> {
const txQuery = await db.getTx({ txId: args.txId, includeUnanchored: args.includeUnanchored });
if (!txQuery.found) {
return { found: false };
let dbTx: DbTx;
if ('dbTx' in args) {
dbTx = args.dbTx;
} else {
const txQuery = await db.getTx({ txId: args.txId, includeUnanchored: args.includeUnanchored });
if (!txQuery.found) {
return { found: false };
}
dbTx = txQuery.result;
}
const dbTx = txQuery.result;
const parsedTx = parseDbTx(dbTx);
// If tx type is contract-call then fetch additional contract ABI details for a richer response

View File

@@ -176,7 +176,11 @@ export function createAddressRouter(db: DataStore, chainId: ChainID): RouterWith
});
// TODO: use getBlockWithMetadata or similar to avoid transaction integrity issues from lazy resolving block tx data (primarily the contract-call ABI data)
const results = await Bluebird.mapSeries(txResults, async tx => {
const txQuery = await getTxFromDataStore(db, { txId: tx.tx_id, includeUnanchored: true });
const txQuery = await getTxFromDataStore(db, {
txId: tx.tx_id,
dbTx: tx,
includeUnanchored: true,
});
if (!txQuery.found) {
throw new Error('unexpected tx not found -- fix tx enumeration query');
}
@@ -199,6 +203,7 @@ export function createAddressRouter(db: DataStore, chainId: ChainID): RouterWith
if (results && results.tx) {
const txQuery = await getTxFromDataStore(db, {
txId: results.tx.tx_id,
dbTx: results.tx,
includeUnanchored: false,
});
if (!txQuery.found) {
@@ -238,6 +243,7 @@ export function createAddressRouter(db: DataStore, chainId: ChainID): RouterWith
const results = await Bluebird.mapSeries(txResults, async entry => {
const txQuery = await getTxFromDataStore(db, {
txId: entry.tx.tx_id,
dbTx: entry.tx,
includeUnanchored: blockParams.includeUnanchored ?? false,
});
if (!txQuery.found) {

View File

@@ -75,7 +75,7 @@ export function createTxRouter(db: DataStore): RouterWithAsync {
// TODO: use getBlockWithMetadata or similar to avoid transaction integrity issues from lazy resolving block tx data (primarily the contract-call ABI data)
const results = await Bluebird.mapSeries(txResults, async tx => {
const txQuery = await getTxFromDataStore(db, { txId: tx.tx_id, includeUnanchored });
const txQuery = await getTxFromDataStore(db, { txId: tx.tx_id, dbTx: tx, includeUnanchored });
if (!txQuery.found) {
throw new Error('unexpected tx not found -- fix tx enumeration query');
}
@@ -258,7 +258,11 @@ export function createTxRouter(db: DataStore): RouterWithAsync {
const dbTxs = await db.getTxsFromBlock(block_hash, limit, offset);
const results = await Bluebird.mapSeries(dbTxs.results, async tx => {
const txQuery = await getTxFromDataStore(db, { txId: tx.tx_id, includeUnanchored: true });
const txQuery = await getTxFromDataStore(db, {
txId: tx.tx_id,
dbTx: tx,
includeUnanchored: true,
});
if (!txQuery.found) {
throw new Error('unexpected tx not found -- fix tx enumeration query');
}
@@ -291,7 +295,11 @@ export function createTxRouter(db: DataStore): RouterWithAsync {
const dbTxs = await db.getTxsFromBlock(blockHash.result.block_hash, limit, offset);
const results = await Bluebird.mapSeries(dbTxs.results, async tx => {
const txQuery = await getTxFromDataStore(db, { txId: tx.tx_id, includeUnanchored: true });
const txQuery = await getTxFromDataStore(db, {
txId: tx.tx_id,
dbTx: tx,
includeUnanchored: true,
});
if (!txQuery.found) {
throw new Error('unexpected tx not found -- fix tx enumeration query');
}

View File

@@ -3,7 +3,16 @@ import * as fs from 'fs';
import { EventEmitter } from 'events';
import { Readable, Writable } from 'stream';
import PgMigrate, { RunnerOption } from 'node-pg-migrate';
import { Pool, PoolClient, ClientConfig, Client, ClientBase, QueryResult, QueryConfig } from 'pg';
import {
Pool,
PoolClient,
ClientConfig,
Client,
ClientBase,
QueryResult,
QueryConfig,
PoolConfig,
} from 'pg';
import * as pgCopyStreams from 'pg-copy-streams';
import * as PgCursor from 'pg-cursor';
@@ -420,11 +429,11 @@ interface TxQueryResult {
// events count
event_count: number;
execution_cost_read_count: number;
execution_cost_read_length: number;
execution_cost_runtime: number;
execution_cost_write_count: number;
execution_cost_write_length: number;
execution_cost_read_count: string;
execution_cost_read_length: string;
execution_cost_runtime: string;
execution_cost_write_count: string;
execution_cost_write_length: string;
}
interface MempoolTxIdQueryResult {
@@ -2213,9 +2222,14 @@ export class PgDataStore
if (!skipMigrations) {
await runMigrations(clientConfig);
}
const pool = new Pool({
const poolConfig: PoolConfig = {
...clientConfig,
});
};
const pgConnectionPoolMaxEnv = process.env['PG_CONNECTION_POOL_MAX'];
if (pgConnectionPoolMaxEnv) {
poolConfig.max = Number.parseInt(pgConnectionPoolMaxEnv);
}
const pool = new Pool(poolConfig);
pool.on('error', error => {
logger.error(`Postgres connection pool error: ${error.message}`, error);
});
@@ -3029,11 +3043,11 @@ export class PgDataStore
sender_address: result.sender_address,
origin_hash_mode: result.origin_hash_mode,
event_count: result.event_count,
execution_cost_read_count: result.execution_cost_read_count,
execution_cost_read_length: result.execution_cost_read_length,
execution_cost_runtime: result.execution_cost_runtime,
execution_cost_write_count: result.execution_cost_write_count,
execution_cost_write_length: result.execution_cost_write_length,
execution_cost_read_count: Number.parseInt(result.execution_cost_read_count),
execution_cost_read_length: Number.parseInt(result.execution_cost_read_length),
execution_cost_runtime: Number.parseInt(result.execution_cost_runtime),
execution_cost_write_count: Number.parseInt(result.execution_cost_write_count),
execution_cost_write_length: Number.parseInt(result.execution_cost_write_length),
};
this.parseTxTypeSpecificQueryResult(result, tx);
return tx;

View File

@@ -90,23 +90,23 @@ export async function up(pgm: MigrationBuilder): Promise<void> {
notNull: true,
},
execution_cost_read_count: {
type: 'integer',
type: 'bigint',
notNull: true,
},
execution_cost_read_length: {
type: 'integer',
type: 'bigint',
notNull: true,
},
execution_cost_runtime: {
type: 'integer',
type: 'bigint',
notNull: true,
},
execution_cost_write_count: {
type: 'integer',
type: 'bigint',
notNull: true,
},
execution_cost_write_length: {
type: 'integer',
type: 'bigint',
notNull: true,
},
@@ -173,6 +173,12 @@ export async function up(pgm: MigrationBuilder): Promise<void> {
pgm.createIndex('txs', 'canonical');
pgm.createIndex('txs', ['canonical', 'microblock_canonical']);
pgm.createIndex('txs', [
{ name: 'block_height', sort: 'DESC' },
{ name: 'microblock_sequence', sort: 'DESC' },
{ name: 'tx_index', sort: 'DESC' },
]);
pgm.addConstraint('txs', 'unique_tx_id_index_block_hash_microblock_hash', `UNIQUE(tx_id, index_block_hash, microblock_hash)`);
// TODO(mb): a unique constraint that enforced something like UNIQUE(tx_id, canonical = true, microblock_canonical = true)