Merge pull request #1724 from hirosystems/master-merge

merge master into develop
This commit is contained in:
Rafael Cárdenas
2023-10-04 10:07:07 -06:00
committed by GitHub
14 changed files with 245 additions and 61 deletions

View File

@@ -972,10 +972,10 @@ jobs:
with:
semantic_version: 19
extra_plugins: |
@semantic-release/changelog
@semantic-release/git
@semantic-release/exec
conventional-changelog-conventionalcommits
@semantic-release/changelog@6.0.3
@semantic-release/git@10.0.1
@semantic-release/exec@6.0.3
conventional-changelog-conventionalcommits@6.1.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1

View File

@@ -1,3 +1,43 @@
## [7.3.2](https://github.com/hirosystems/stacks-blockchain-api/compare/v7.3.1...v7.3.2) (2023-09-14)
### Bug Fixes
* log block ingestion time ([#1713](https://github.com/hirosystems/stacks-blockchain-api/issues/1713)) ([e7c01a8](https://github.com/hirosystems/stacks-blockchain-api/commit/e7c01a8b5c1fb8c3fbd3eeb4795be8b35c1bcbcd))
## [7.3.1](https://github.com/hirosystems/stacks-blockchain-api/compare/v7.3.0...v7.3.1) (2023-09-11)
### Bug Fixes
* allow more than one Rosetta `stx_unlock` operation per block ([#1712](https://github.com/hirosystems/stacks-blockchain-api/issues/1712)) ([81221c8](https://github.com/hirosystems/stacks-blockchain-api/commit/81221c8c1388d4e2d92cebce85311b7941e15be1))
## [7.3.0](https://github.com/hirosystems/stacks-blockchain-api/compare/v7.2.2...v7.3.0) (2023-07-12)
### Features
* stacking pool members endpoint ([#1592](https://github.com/hirosystems/stacks-blockchain-api/issues/1592)) ([3cd6023](https://github.com/hirosystems/stacks-blockchain-api/commit/3cd6023e895c964ed3d744652b169d51254ea6ed)), closes [#465](https://github.com/hirosystems/stacks-blockchain-api/issues/465)
* support custom chain_id (e.g. for subnets) ([#1669](https://github.com/hirosystems/stacks-blockchain-api/issues/1669)) ([1c6e35a](https://github.com/hirosystems/stacks-blockchain-api/commit/1c6e35a2dc0b5c161d35f291220a0bef6c6f5d28))
* support for subnets ([#1549](https://github.com/hirosystems/stacks-blockchain-api/issues/1549)) ([5d7056c](https://github.com/hirosystems/stacks-blockchain-api/commit/5d7056c1ba0aa0b202f341a83adf0f6bd2d13c71))
* support for subnets ([#1625](https://github.com/hirosystems/stacks-blockchain-api/issues/1625)) ([bfac932](https://github.com/hirosystems/stacks-blockchain-api/commit/bfac932f098f0311c9cf180b87724f871d1df82b)), closes [#1549](https://github.com/hirosystems/stacks-blockchain-api/issues/1549) [#1528](https://github.com/hirosystems/stacks-blockchain-api/issues/1528) [#1583](https://github.com/hirosystems/stacks-blockchain-api/issues/1583) [#1583](https://github.com/hirosystems/stacks-blockchain-api/issues/1583)
### Bug Fixes
* add indexes to pox3_events table used for stacker lookup endpoints ([86304be](https://github.com/hirosystems/stacks-blockchain-api/commit/86304beb34a560d0452af5161e304046d97f8beb))
* disabled BTC faucet endpoint ([#1530](https://github.com/hirosystems/stacks-blockchain-api/issues/1530)) ([ce55212](https://github.com/hirosystems/stacks-blockchain-api/commit/ce55212f95fc52a3e890e78681e89682079c8f0f))
* domain migration ([#1596](https://github.com/hirosystems/stacks-blockchain-api/issues/1596)) ([2769e68](https://github.com/hirosystems/stacks-blockchain-api/commit/2769e684688f6d6c049baabc1d7777a330bc3f40))
* enable requests auto logging ([#1656](https://github.com/hirosystems/stacks-blockchain-api/issues/1656)) ([2015b9c](https://github.com/hirosystems/stacks-blockchain-api/commit/2015b9c8805c189ebd80dfe16b775f805810a63f))
* fixed the order of microblocks_streamed returned in reverse order in block endpoint ([#1528](https://github.com/hirosystems/stacks-blockchain-api/issues/1528)) ([764f64a](https://github.com/hirosystems/stacks-blockchain-api/commit/764f64a538c88a17c381eccb867ed3032e73bea1))
* log cleanup ([#1613](https://github.com/hirosystems/stacks-blockchain-api/issues/1613)) ([a067e39](https://github.com/hirosystems/stacks-blockchain-api/commit/a067e3906b89f9e1b40adb98072927d977f870d2))
* log level issues ([#1605](https://github.com/hirosystems/stacks-blockchain-api/issues/1605)) ([c3a2377](https://github.com/hirosystems/stacks-blockchain-api/commit/c3a237709a241eef4867258c8aac79dfdf4569e3)), closes [#1603](https://github.com/hirosystems/stacks-blockchain-api/issues/1603) [#1603](https://github.com/hirosystems/stacks-blockchain-api/issues/1603) [#1604](https://github.com/hirosystems/stacks-blockchain-api/issues/1604) [#1604](https://github.com/hirosystems/stacks-blockchain-api/issues/1604) [#1452](https://github.com/hirosystems/stacks-blockchain-api/issues/1452)
* npm publish step ([#1617](https://github.com/hirosystems/stacks-blockchain-api/issues/1617)) ([c9cdbb6](https://github.com/hirosystems/stacks-blockchain-api/commit/c9cdbb693eb95cc0048041339ef3f0a7c2f5219f))
* optimize queries to retrieve BNS names ([#1581](https://github.com/hirosystems/stacks-blockchain-api/issues/1581)) ([1a6fde1](https://github.com/hirosystems/stacks-blockchain-api/commit/1a6fde145bd979614c614af95cd38d08a022ea3d))
* use chaintip-cache-control in `/stx_supply` endpoints [#1590](https://github.com/hirosystems/stacks-blockchain-api/issues/1590) ([#1594](https://github.com/hirosystems/stacks-blockchain-api/issues/1594)) ([a47f153](https://github.com/hirosystems/stacks-blockchain-api/commit/a47f1530a24da18bdcd9e6da64076a722e76af20))
* use pox3 for `/extended/beta/stacking/...` endpoint ([872f7e6](https://github.com/hirosystems/stacks-blockchain-api/commit/872f7e614443c2f26d4ca749782b8b96ef77fa93))
* warning logger level for RPC proxy errors ([#1612](https://github.com/hirosystems/stacks-blockchain-api/issues/1612)) ([2454932](https://github.com/hirosystems/stacks-blockchain-api/commit/24549328d2e5ae974b7beb18baaccaa5e9d6685e))
## [7.2.2](https://github.com/hirosystems/stacks-blockchain-api/compare/v7.2.1...v7.2.2) (2023-06-07)

74
MAINTAINERS.md Normal file
View File

@@ -0,0 +1,74 @@
# Maintainer's guide
This guide is intended for maintainers — anybody with commit access to this repository.
*Note:* This guide is a living standard;
that is, it is meant to *describe* the project's maintenance practices,
rather than *prescribe* them.
As a maintainer, you're expected to refer to it for clarifications
about the collaborative workflows of the project,
but also to propose changes to it
that you feel would make it more useful
as a guideline for current and future maintainers.
We use the [git flow methodology](http://nvie.com/posts/a-successful-git-branching-model/) for
managing this repository. At a glance, this means:
- a **master** branch. This branch MUST be releasable at all times. Commits and merges against
this branch MUST contain only bugfixes and/or security fixes. Maintenance releases are tagged
against master.
- a **develop** branch. This branch contains *new features*, and will either become the next minor
(feature) release or next major release. Typically, major releases are reserved for backwards
*incompatible* changes, but can also be used to signal major new features.
## I. Branch Naming Conventions
- In addition to master and develop branches, these are the standards for features, fixes, chores and releases,
1. **features** All feature branches must be created under **feat/**,
2. **bug-fixes** All fixes must be created under **fix/**,
3. **chores** Ad-hoc tasks that are not features, minor housekeeping, maintenance tasks should be created under **chores/**
4. Avoid branches being grouped under your usernames
## II. Handling PRs
- When creating a PR, you should:
1. Clearly describe the intent of the PR
2. Describe the solution in detail with links to the original issue and any related issues that it might fix or close.
3. GitHub Draft PRs are a great way to get CI or human feedback on work that isn't yet ready to merge. PRs can be created as drafts and converted to normal PRs once the CI passes. More information about GitHub Draft PRs: https://github.blog/2019-02-14-introducing-draft-pull-requests/
- PRs should be merged once they
1. **pass the automated tests** (GitHub Actions, CLA signing, etc.),
2. have the **review comments addressed**,
3. get **approved reviews by two maintainers**, (the second maintainer can merge immediately after approving) and
4. have been open for at least **24 hours** unless the changes are trivial
- To merge a pull request, it must have at least:
- one approval for simple documentation fixes
- two approvals for everything else
- When merging a PR, you should:
1. Use the **merge strategy that produces a clean Git history**: ["Squash and merge"](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/about-pull-request-merges#squash-and-merge-your-pull-request-commits) commits and ensure the resulting commit message is:
- descriptive
- sentence case
- If instead the PR author took the time to craft individual, informative messages for each commit, then use the `Rebase and merge` method,to honor that work and preserve the history of the changes.
- For less clear-cut cases, a simple heuristic you can follow is that if there are more "dirty" commits than "clean" commits,then prefer squash, else do a rebase.
2. Ensure [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/) are used in the PR. When properly annotated, the commit messages will automatically update the changelog.
- If a PR fails to get a review from a second maintainer after a few days, the first maintainer should ping others for review. If it still lingers around for **over a week without a second maintainers approval**,the first maintainer can go ahead and merge it.
- If the only issues holding up a merge are **trivial fixes**
(typos, syntax errors, etc.), and the author doesn't respond in a day or two,
**maintainers can make the necessary changes themselves**,
and proceed with the merge process.
- If a PR **stops getting feedback from the submitter** and is marked as stale
by [probot-stale](../.github/workflows/stale.yml),
any maintainer can choose to take over the PR
and make the necessary changes to get the content ready for merging.
- **Avoid merging your own PRs** unless approved by other maintainers.

View File

@@ -5,11 +5,11 @@ title: Storing data with Gaia
# Gaia Storage
The Gaia storage system allows you to store private app data off the blockchain and still access it securely
with Stacks applications. Where possible, apps should only store critical transactional metadata directly to
with Stacks applications. Where possible, apps should only store critical transactional metadata directly on
the Stacks blockchain, while keeping app and user data in the Gaia storage system. For more information about
the Gaia storage system, see the [Gaia protocol reference](https://docs.stacks.co/build-apps/references/gaia).
A [Gaia hub](https://docs.stacks.co/build-apps/references/gaia#user-control-or-how-is-gaia-decentralized) consists of a service and a storage
resource, generally hosted on the same cloud compute provider. The hub service requires an authentication token from a
storage requester, and writes key-value pairs to the associated storage resource. Storage user can choose a Gaia
hub provider. This documentation provides an overview of how to set up and operate a Gaia hub.
storage requester, and writes key-value pairs to the associated storage resource. Individual storage users can choose their Gaia
hub provider. The linked documentation provides an overview of how to set up and operate a Gaia hub.

View File

@@ -55,5 +55,5 @@ rosetta-cli \
`rosetta-cli` will then sync with the blockchain until it reaches the tip, and then exit, displaying the test results.
Currently, account reconciliation is disabled; proper testing of that feature requires token transfer transactions while `rosetta-cli` is running.
Documentation for the Rosetta APIs can be found [here](https://hirosystems.github.io/stacks-blockchain-api/)
Documentation for the Rosetta APIs can be found [here](https://hirosystems.github.io/stacks-blockchain-api/).
You may also review Data and Construction Rosetta endpoints [here](https://docs.hiro.so/api#tag/Rosetta)

View File

@@ -4,18 +4,18 @@ title: Transactions
# Transactions
Transactions are the fundamental unit of execution in the Stacks blockchain. Each transaction is originated from a [Stacks 2.0 account](https://docs.stacks.co/understand-stacks/accounts), and is retained in the Stacks blockchain history. This guide helps you understand Stacks 2.0 transactions.
Transactions are the fundamental unit of execution in the Stacks blockchain. Each transaction is originated from a [Stacks account](https://docs.stacks.co/understand-stacks/accounts), and is retained in the Stacks blockchain history. This guide helps you understand Stacks transactions.
## Lifecycle
Transactions go through phases before being finally confirmed, and available for all, on the Stacks 2.0 network.
Transactions go through phases before being finally confirmed and propagated on the Stacks network.
- **Generate**: Transactions are assembled according to the encoding specification.
- **Validate and sign**: Transactions are validated to confirm they are well-formed. Required signatures are filled in.
- **Broadcast**: Transactions are sent to a node.
- **Register**: A miner receives transactions, verifies, and adds them to the ["mempool"](https://academy.binance.com/en/glossary/mempool), a holding area for all the pending transactions.
- **Process**: Miners review the mempool and select transactions for the next block to be mined. Depending on the transaction type, different actions can happen during this step. For example, post-conditions could be verified for a token transfer, smart-contract defined tokens could be minted, or an attempt to call an existing smart contract method could be made.
- **Confirm**: Miners successfully mine blocks with a set of transactions. The transactions inside are successfully propagated to the network.
- **Confirm**: Miners successfully mine blocks, with each block containing a set of transactions. The transactions inside are successfully propagated to the network.
:::info
@@ -25,7 +25,7 @@ A transaction can have one of three states once it is registered: `pending`, `su
## Types
The Stacks 2.0 supports a set of different transaction types:
Stacks supports a set of different transaction types:
| **Type** | **Value** | **Description** |
| ----------------- | ------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -82,8 +82,8 @@ Each transaction includes a field that describes zero or more post-conditions th
| **Attribute** | **Sample** | **Description** |
| -------------------------------------------------------------------- | ------------------------------------------- | ------------------------------------------------------------------------------------------------ |
| [Principal](https://docs.stacks.co/write-smart-contracts/principals) | `SP2ZD731ANQZT6J4K3F5N8A40ZXWXC1XFXHVVQFKE` | original owner of the asset, can be a Stacks address or a contract |
| Asset id | `STX` | Asset to apply conditions to (could be Stacks, fungible, or non-fungible tokens) |
| [Principal](https://docs.stacks.co/write-smart-contracts/principals) | `SP2ZD731ANQZT6J4K3F5N8A40ZXWXC1XFXHVVQFKE` | Original owner of the asset, can be a Stacks address or a contract |
| Asset id | `STX` | Asset to apply conditions to (could be STX, fungible, or non-fungible tokens) |
| Comparator | `>=` | Compare operation to be applied (could define "how much" or "whether or not the asset is owned") |
| Literal | `1000000` | Integer or boolean value used to compare instances of the asset against via the condition |
@@ -126,7 +126,7 @@ When constructing transactions, it is required to set the network the transactio
:::info
Transactions can be constructed and serialized offline. However, it is required to know the nonce and estimated fees ahead of time. Once internet access is available, the transaction can be broadcasted to the network. Keep in mind that the nonce and fee might change during offline activity, making the transaction invalid.
Transactions can be constructed and serialized offline. However, it is required to know the nonce and estimated fees ahead of time. Once internet access is available, the transaction can be broadcast to the network. Keep in mind that the nonce and fee might change during offline activity, making the transaction invalid.
:::
@@ -201,7 +201,7 @@ const transaction = await makeContractCall(txOptions);
### Clarity value types
Building transactions that call functions in deployed clarity contracts requires you to construct valid Clarity Values to pass to the function as arguments. The [Clarity type system](https://github.com/stacksgov/sips/blob/main/sips/sip-002/sip-002-smart-contract-language.md#clarity-type-system) contains the following types:
Building transactions that call functions in deployed clarity contracts requires you to construct valid Clarity values to pass to the function as arguments. The [Clarity type system](https://github.com/stacksgov/sips/blob/main/sips/sip-002/sip-002-smart-contract-language.md#clarity-type-system) contains the following types:
| Type | Declaration | Description |
| ---------------- | ------------------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -259,7 +259,7 @@ const tupCV = tupleCV({
const l = listCV([trueCV(), falseCV()]);
```
If you develop in Typescript, the type checker can help prevent you from creating wrongly typed Clarity values. For example, the following code won't compile since in Clarity lists are homogeneous, meaning they can only contain values of a single type. It is important to include the type variable `BooleanCV` in this example, otherwise the typescript type checker won't know which type the list is of and won't enforce homogeneity.
If you develop in Typescript, the type checker can help prevent you from creating wrongly-typed Clarity values. For example, the following code won't compile since lists are homogeneous in Clarity, meaning they can only contain values of a single type. It is important to include the type variable `BooleanCV` in this example; otherwise the typescript type checker won't know which type the list is of and won't enforce homogeneity.
```js
const l = listCV < BooleanCV > [trueCV(), intCV(1)];
@@ -387,15 +387,11 @@ Below are the steps taken to generate the signature internal to the transaction
### Signing steps
Step 1: Generate a transaction hash for signing. This is the SHA512/256 digest of the serialized transaction before a signature is added.
Step 2: Append the authorization type, fee amount and nonce to the transaction hash to create the signature hash.
Step 3: Generate the SHA512/256 hash of the resulting string from the previous step.
Step 4: Sign the hash using ECDSA and the origin private key.
Step 5: Add the resulting recoverable ECDSA signature to the transaction spending condition.
1: Generate a transaction hash for signing. This is the SHA512/256 digest of the serialized transaction before a signature is added.
2: Append the authorization type, fee amount and nonce to the transaction hash to create the signature hash.
3: Generate the SHA512/256 hash of the resulting string from the previous step.
4: Sign the hash using ECDSA and the origin private key.
5: Add the resulting recoverable ECDSA signature to the transaction spending condition.
### Single signature transaction
@@ -431,7 +427,9 @@ There is no explicit time constraint between the construction of a valid signed
Once a transaction has been successfully broadcast to the network, the transaction is added to the mempool of the node
that received the broadcast. From the [Bitcoin wiki][]: "a node's memory pool contains all 0-confirmation transactions
across the entire network that that particular node knows about." So, the set of transactions in the mempool might be
across the entire network that that particular node knows about."
So, the set of transactions in the mempool might be
different for each node in the network. For example, when you query the mempool endpoints on
`api.mainnet.hiro.so`, the response reflects the set of unconfirmed transactions known to the nodes that
service that API.
@@ -439,7 +437,9 @@ service that API.
Miners can employ different heuristics and strategies for deciding which transactions to admit into the mempool and
which transactions to include from the mempool when mining a block. Some transactions may be rejected outright (for
example, if there are insufficient funds at an address) while others might be accepted into the mempool, but not mined
into a block indefinitely (for example if fees are too low). Transactions that are admitted in the mempool but not yet
into a block indefinitely (for example if fees are too low).
Transactions that are admitted in the mempool but not yet
mined are said to be "pending." The current implementation of [stacks-blockchain][] discards pending mempool
transactions after [256 blocks][].
@@ -596,7 +596,7 @@ Sample response:
Broadcast transactions stay in the mempool for 256 blocks (~42 hours). If a transaction is not confirmed within that time, it is removed from the mempool.
!> Most transactions stay in the mempool due to nonce issues. If you see a transaction pending for an unusual time, review the nonce of the account and the transaction.
Most transactions stay in the mempool due to nonce issues. If you see a transaction pending for an unusual time, review the nonce of the account and the transaction.
If a transaction is removed from the mempool, the transaction was not processed and no changes were made to the blockchain state.

View File

@@ -8,7 +8,7 @@ This page describes how you can start the API server and service dependencies.
## Prerequisites
Before you can start the API server and its dependencies, you must first ensure that Docker is already installed on your machine. If you do not aready have Docker installed, please install Docker.
Before you can start the API server and its dependencies, you must first ensure that Docker is already installed on your machine. If you do not aready have Docker installed, please install Docker [here](https://www.docker.com/).
## Starting the API Server

View File

@@ -4,6 +4,10 @@ Title: Run a Stacks Blockchain API instance with Docker
# Run a Stacks Blockchain API instance with Docker
> **_NOTE:_**
>
> For a faster way to deploy the Stacks Blockchain and Stacks Blockchain API with Docker, see the [Stacks Blockchain Docker](https://github.com/stacks-network/stacks-blockchain-docker) repository.
On this page, you will learn how to run a [stacks-blockchain-api](https://github.com/hirosystems/stacks-blockchain-api) instance. There are several components involved here to have a working setup, and descriptions will be given for each of these components.
This page will also focus on the **easy** path to get the services running, which is currently Docker.

View File

@@ -1,21 +0,0 @@
---
Title: How-to use Docker with Stacks Blockchain API
---
# How-to use Docker with Stacks Blockchain API
A self-contained Docker image is provided, which will start a Stacks 2.05 blockchain and API instance.
# Installing Docker
To install Docker so you can use it with a Stacks Blockchain API:
1. Ensure Docker is installed, then run the command:
`docker run -p 3999:3999 hirosystems/stacks-blockchain-api-standalone`
2. Similarly, you can start a a "mocknet" instance, which will run a local node, isolated from the testnet/mainnet by running the following command:
`docker run -p 3999:3999 -e STACKS_NETWORK=mocknet hirosystems/stacks-blockchain-api-standalone`
3. Once the blockchain has synced with network, the API will be available at the following location: http://localhost:3999

View File

@@ -8,6 +8,12 @@ The Stacks blockchain API allows you to query the Stacks blockchain and interact
The Stacks Blockchain API is hosted by Hiro. Using it requires you to trust the hosted server, but this API also provides a faster development experience. You may wish to consider running your own API instance to create a fully trustless architecture for your app.
> **_NOTE:_**
>
> To explore the detailed documentation for the API endpoints, request and response formats, you can refer to the [OpenAPI specification](https://docs.hiro.so/api).
>
> The source code for this project is available in our [GitHub repository](https://github.com/hirosystems/stacks-blockchain-api). You can explore the codebase, [contribute](https://docs.hiro.so/contributors-guide), and raise [issues](https://github.com/hirosystems/stacks-blockchain-api/issues) or [pull requests](https://github.com/hirosystems/stacks-blockchain-api/pulls).
## Architecture
![API architecture!](images/api-architecture.png)

View File

@@ -4228,7 +4228,6 @@ export class PgStore {
)
)
ORDER BY stacker, block_height DESC, microblock_sequence DESC, tx_index DESC, event_index DESC
LIMIT 1
`;
poxV2Unlocks = pox2EventQuery.map(row => {
const pox2Event = parseDbPox2Event(row);
@@ -4306,7 +4305,6 @@ export class PgStore {
)
)
ORDER BY stacker, block_height DESC, microblock_sequence DESC, tx_index DESC, event_index DESC
LIMIT 1
`;
poxV3Unlocks = pox3EventQuery.map(row => {
const pox3Event = parseDbPox2Event(row) as DbPox3Event;

View File

@@ -5,7 +5,15 @@ import * as express from 'express';
import * as bodyParser from 'body-parser';
import { asyncHandler } from '../api/async-handler';
import PQueue from 'p-queue';
import { ChainID, getChainIDNetwork, getIbdBlockHeight, hexToBuffer } from '../helpers';
import * as prom from 'prom-client';
import {
ChainID,
getChainIDNetwork,
getIbdBlockHeight,
hexToBuffer,
isProdEnv,
stopwatch,
} from '../helpers';
import {
CoreNodeBlockMessage,
CoreNodeEventType,
@@ -228,6 +236,7 @@ async function handleBlockMessage(
msg: CoreNodeBlockMessage,
db: PgWriteStore
): Promise<void> {
const ingestionTimer = stopwatch();
const parsedTxs: CoreNodeParsedTxMessage[] = [];
const blockData: CoreNodeMsgBlockData = {
...msg,
@@ -323,6 +332,8 @@ async function handleBlockMessage(
};
await db.update(dbData);
const ingestionTime = ingestionTimer.getElapsed();
logger.info(`Ingested block ${msg.block_height} (${msg.block_hash}) in ${ingestionTime}ms`);
}
function parseDataStoreTxEventData(
@@ -697,10 +708,30 @@ function createMessageProcessorQueue(): EventMessageHandler {
// Create a promise queue so that only one message is handled at a time.
const processorQueue = new PQueue({ concurrency: 1 });
let eventTimer: prom.Histogram<'event'> | undefined;
if (isProdEnv) {
eventTimer = new prom.Histogram({
name: 'stacks_event_ingestion_timers',
help: 'Event ingestion timers',
labelNames: ['event'],
buckets: prom.exponentialBuckets(50, 3, 10), // 10 buckets, from 50 ms to 15 minutes
});
}
const observeEvent = async (event: string, fn: () => Promise<void>) => {
const timer = stopwatch();
try {
await fn();
} finally {
const elapsedMs = timer.getElapsed();
eventTimer?.observe({ event }, elapsedMs);
}
};
const handler: EventMessageHandler = {
handleRawEventRequest: (eventPath: string, payload: any, db: PgWriteStore) => {
return processorQueue
.add(() => handleRawEventRequest(eventPath, payload, db))
.add(() => observeEvent('raw_event', () => handleRawEventRequest(eventPath, payload, db)))
.catch(e => {
logger.error(e, 'Error storing raw core node request data');
throw e;
@@ -708,7 +739,7 @@ function createMessageProcessorQueue(): EventMessageHandler {
},
handleBlockMessage: (chainId: ChainID, msg: CoreNodeBlockMessage, db: PgWriteStore) => {
return processorQueue
.add(() => handleBlockMessage(chainId, msg, db))
.add(() => observeEvent('block', () => handleBlockMessage(chainId, msg, db)))
.catch(e => {
logger.error(e, 'Error processing core node block message');
throw e;
@@ -720,7 +751,7 @@ function createMessageProcessorQueue(): EventMessageHandler {
db: PgWriteStore
) => {
return processorQueue
.add(() => handleMicroblockMessage(chainId, msg, db))
.add(() => observeEvent('microblock', () => handleMicroblockMessage(chainId, msg, db)))
.catch(e => {
logger.error(e, 'Error processing core node microblock message');
throw e;
@@ -728,7 +759,7 @@ function createMessageProcessorQueue(): EventMessageHandler {
},
handleBurnBlock: (msg: CoreNodeBurnBlockMessage, db: PgWriteStore) => {
return processorQueue
.add(() => handleBurnBlockMessage(msg, db))
.add(() => observeEvent('burn_block', () => handleBurnBlockMessage(msg, db)))
.catch(e => {
logger.error(e, 'Error processing core node burn block message');
throw e;
@@ -736,7 +767,7 @@ function createMessageProcessorQueue(): EventMessageHandler {
},
handleMempoolTxs: (rawTxs: string[], db: PgWriteStore) => {
return processorQueue
.add(() => handleMempoolTxsMessage(rawTxs, db))
.add(() => observeEvent('mempool_txs', () => handleMempoolTxsMessage(rawTxs, db)))
.catch(e => {
logger.error(e, 'Error processing core node mempool message');
throw e;
@@ -744,7 +775,9 @@ function createMessageProcessorQueue(): EventMessageHandler {
},
handleDroppedMempoolTxs: (msg: CoreNodeDropMempoolTxMessage, db: PgWriteStore) => {
return processorQueue
.add(() => handleDroppedMempoolTxsMessage(msg, db))
.add(() =>
observeEvent('dropped_mempool_txs', () => handleDroppedMempoolTxsMessage(msg, db))
)
.catch(e => {
logger.error(e, 'Error processing core node dropped mempool txs message');
throw e;
@@ -752,7 +785,7 @@ function createMessageProcessorQueue(): EventMessageHandler {
},
handleNewAttachment: (msg: CoreNodeAttachmentMessage[], db: PgWriteStore) => {
return processorQueue
.add(() => handleNewAttachmentMessage(msg, db))
.add(() => observeEvent('new_attachment', () => handleNewAttachmentMessage(msg, db)))
.catch(e => {
logger.error(e, 'Error processing new attachment message');
throw e;

View File

@@ -0,0 +1,48 @@
import { DbTxTypeId } from '../../src/datastore/common';
import { PgWriteStore } from '../datastore/pg-write-store';
import { importEventsFromTsv } from '../event-replay/event-replay';
describe('poison microblock for height 80743', () => {
let db: PgWriteStore;
beforeEach(async () => {
process.env.PG_DATABASE = 'postgres';
db = await PgWriteStore.connect({
usageName: 'tests',
withNotifier: false,
skipMigrations: true,
});
});
afterEach(async () => {
await db?.close();
});
test('test that it does not give 500 error', async () => {
await importEventsFromTsv(
'src/tests-event-replay/tsv/poisonmicroblock.tsv',
'archival',
true,
true
);
const poisonTxId = '0x58ffe62029f94f7101b959536ea4953b9bce0ec3f6e2a06254c511bdd5cfa9e7';
const chainTip = await db.getUnanchoredChainTip();
// query the txs table and check the transaction type
const searchResult = await db.searchHash({ hash: poisonTxId });
let entityData: any;
if (searchResult.result?.entity_data) {
entityData = searchResult.result?.entity_data;
}
expect(chainTip.found).toBe(true);
// check the transaction type to be contract call for this poison block
expect(entityData.type_id).toBe(DbTxTypeId.ContractCall);
expect(searchResult.found).toBe(true);
expect(chainTip.result?.blockHeight).toBe(1);
expect(chainTip.result?.indexBlockHash).toBe(
'0x05ca75b9949195da435e6e36d731dbaa10bb75fda576a52263e25164990bfdaa'
);
expect(chainTip.result?.blockHash).toBe(
'0x6b83b44571365e6e530d679536578c71d6c376b07666f3671786b6fd8fac049c'
);
});
});

File diff suppressed because one or more lines are too long