fix: release 1.0.2 (#179)

* chore: stop auto-adding issues to DevTools Project (#170)

* fix: enable streaming for in-memory observers (#171)

* Squashed commit of the following:

commit 9862b71c34
Author: semantic-release-bot <semantic-release-bot@martynus.net>
Date:   Thu Sep 7 00:06:39 2023 +0000

    chore(release): 1.0.0 [skip ci]

    ## 1.0.0 (2023-09-07)

    ### Features

    * ability to control inclusion of inputs/outputs/proofs/witness ([daf5547](daf55476c9))
    * ability to download hord.sqlite ([3dafa53](3dafa53ac0))
    * ability to generate config ([9fda9d0](9fda9d0d34))
    * ability to replay inscriptions ([f1adca9](f1adca9b0f))
    * ability to resume ([6c7eaa3](6c7eaa3bee))
    * ability to target blocks ([f6be49e](f6be49e24d))
    * ability to tolerate corrupted data ([adb1b98](adb1b988a6))
    * ability to track updates when scanning bitcoin (+refactor) ([9e54bff](9e54bfff35))
    * ability to update stacks db from cli + fix caching logic ([3ea9f59](3ea9f597af))
    * add command to check stacks db integrity ([322f473](322f47343c))
    * add get block command to cli ([97de0b0](97de0b071b))
    * add log, fix ordinal transfers scan ([c4202da](c4202dad2c))
    * add logs ([473ddd0](473ddd0595))
    * add metrics to `/ping` response of event observer server ([#297](https://github.com/hirosystems/ordhook/issues/297)) ([0e1ee7c](0e1ee7c1ee)), closes [#285](https://github.com/hirosystems/ordhook/issues/285)
    * add option to skip chainhook node ping ([a7c0b12](a7c0b12ad9))
    * add options for logs ([917090b](917090b408))
    * add post_transfer_output_value ([4ce0e9e](4ce0e9e5db))
    * add retry ([117e41e](117e41eae8))
    * add shared cache ([07523ae](07523aed1a))
    * add support for bitcoin op DelegatedStacking ([6516155](6516155055))
    * add transfers table ([db14f60](db14f60347))
    * always try to initialize tables when starting service ([1a9eddb](1a9eddb6aa))
    * attempt to scale up multithreading ([be91202](be91202d6b))
    * attempt to support cursed inscriptions ([9b45f90](9b45f908b8))
    * attempt transition to lazy model ([dda0b03](dda0b03ea3))
    * batch ingestion, improve cleaning ([168162e](168162e0dd))
    * better handling of blessed inscription turning cursed ([f11509a](f11509ab97))
    * cascade changes in CLI interface ([24f27fe](24f27fea63))
    * cascade hord activation ([42c090b](42c090ba7e))
    * chainhook-sdk config niteties ([7d9e179](7d9e179464))
    * class interface ([9dfec45](9dfec454f5))
    * client draft ([6a6451c](6a6451c864))
    * complete migration to lazy blocks ([fa50584](fa5058471a))
    * disable certs ([389f77d](389f77d473))
    * draft naive inscription detection ([9b3e38a](9b3e38a441))
    * draft ordhook-sdk-js ([b264e72](b264e7281b))
    * draft sha256 verification (wip) ([e6f0619](e6f0619a7c))
    * drafting lazy deserialization ([eaa2f71](eaa2f71fce))
    * dry config ([135297e](135297e978))
    * expose `is_streaming_blocks` prop ([1ba27d7](1ba27d7459))
    * expose more functions for working with the indexer ([654fead](654feadbdf))
    * expose scanning status in GET endpoint ([156c463](156c463cc0))
    * expose transfers_pre_inscription ([65afd77](65afd77492))
    * fetch full bitcoin block, including witness data ([ee9a345](ee9a3452ac))
    * fix download block ([38b50df](38b50df7a1))
    * handle stacks unconfirmed state scans ([f6d050f](f6d050fbce))
    * handle transfer ([fd5da52](fd5da52df4))
    * HTTP responses adjustments ([51572ef](51572efd93))
    * implement and document new development flow ([66019a0](66019a06e7))
    * implement zmq runloop ([c6c1c0e](c6c1c0ecce))
    * import inscription parser ([45e0147](45e0147ecf))
    * improve cli ergonomics ([991e33f](991e33ff42))
    * improve cli experience ([e865628](e8656285b2))
    * improve debug log ([5df77d7](5df77d7f84))
    * improve hord db commands ([21c09c2](21c09c296f))
    * improve onboarding ([deaa739](deaa739bdd))
    * improve ordinal scan efficiency ([e510d4b](e510d4bd09))
    * improve README ([f30e6f4](f30e6f4ed5))
    * improve repair command conveniency ([46be0ab](46be0ab5a7))
    * improving curse approach ([dcb8054](dcb805485f))
    * in-house thread pool ([bc5ffdd](bc5ffddb5b))
    * inscription replay speedup ([33a4f8b](33a4f8b6af))
    * introduce check command ([f17dc4c](f17dc4c343))
    * introduce evaluation reports ([54ad874](54ad874ee5))
    * introduce migration script ([8c2b16c](8c2b16cc48))
    * introduce new predicate + refactor schemas ([611c79c](611c79cee3))
    * introduce rocksdb storage for Stacks ([4564e88](4564e8818a))
    * introduce sync command ([ab022e6](ab022e6098))
    * introduce terminate function ([91616f6](91616f6531))
    * is_streaming_blocks ([aacf487](aacf487de6))
    * keep 1st tx in cache ([0978a5d](0978a5d4c1))
    * logic to start ingestion during indexing ([3c1c99d](3c1c99df5d))
    * merge "inscription_revealed" and "inscription_transferred" into "inscription_feed" ([741290d](741290de13))
    * migrate stacks scans to rocksdb ([4408b1e](4408b1e7ec))
    * migration to rocksdb, moving json parsing from networking thread ([5ad0147](5ad0147fa0))
    * move thread pool size to config ([bc313fa](bc313fad5c))
    * multithread traversals ([fba5c89](fba5c89a48))
    * number of retries for 4 to 3 ([b294dff](b294dff69a))
    * optimize memory ([5db1531](5db1531a3d))
    * optimize replay ([be26dac](be26daccd0))
    * ordinal inscription_transfer code complete ([f55a5ee](f55a5ee167))
    * plug inscription processing in ibd ([df36617](df36617214))
    * plumbing for ordhook-sdk-js ([7487589](74875896a3))
    * polish `hord find sat_point` command ([d071484](d0714842a2))
    * polish first impression ([3c2b00c](3c2b00ce38))
    * predicate schemas ([198cdaa](198cdaa6c8))
    * prototype warmup ([fa6c86f](fa6c86fb1f))
    * re-approach stacks block commit schema ([218d599](218d5998d6))
    * re-implement satoshi overflows handling ([8ea5bdf](8ea5bdf819))
    * re-introduce ingestion ([71c90d7](71c90d755d))
    * restore ability to replay transfers ([98e7e9b](98e7e9b21d))
    * return enable in api ([f39259c](f39259ceeb))
    * return local result when known ([5441851](5441851db7))
    * revisit caching strategy ([2705b95](2705b9501b))
    * revisit threading model ([05b6d5c](05b6d5c4d7))
    * scan inscription revealed ([84c5a0c](84c5a0c521))
    * scan inscription revealed ([644d515](644d5155d2))
    * share traversals_cache over 10 blocks spans ([b0378c3](b0378c3099))
    * simplify + improve coordination ([1922fd9](1922fd9bc4))
    * start investigating zmq signaling ([0ec2653](0ec265380c))
    * streamline processors ([13421db](13421db297))
    * support cursed inscriptions in chainhook client ([d7cc5a4](d7cc5a4410))
    * support for latest archives, add logs ([494cf3c](494cf3c9a5))
    * tweak mmap / page_size values ([5316a57](5316a575b0))
    * update chainhook-sdk ([f052e08](f052e08469))
    * update inscription transfer logic ([9d0d106](9d0d106e9c))
    * update inscription transfer schemas ([f80e983](f80e983481))
    * upgrade `service start`  implementation + documentation ([02db65e](02db65e417))
    * use caching on streamed blocks ([784e9a0](784e9a0830))
    * use thread pools for scans ([45b9abd](45b9abd3e0))
    * zmq sockets ([d2e328a](d2e328aa57))

    ### Bug Fixes

    * ability to run without redis ([96825c3](96825c35a8))
    * add busy handler ([d712e0d](d712e0ddae))
    * add exp backoff ([f014c14](f014c14277))
    * add retry logic in rocksdb ([247df20](247df2088a))
    * add retry logic to work around unexpected responses from bitcoind ([2ab6b32](2ab6b32ff0))
    * additional adjustments ([fe26063](fe26063513))
    * additional fixes (network, address, offsets) ([8006000](8006000034))
    * address build warnings ([dc623a0](dc623a01e5))
    * address non-inscribed block case ([a7d08a3](a7d08a3722))
    * address redis disconnects ([a6b4a5f](a6b4a5fb38))
    * address remaining issues ([74b2fa9](74b2fa9411))
    * adjust error message ([3e7b0d0](3e7b0d03f9))
    * allow empty block ([fe8ce45](fe8ce455a1))
    * always fetch blocks ([97060a1](97060a13ca))
    * async/await regression ([676aac1](676aac196d))
    * attempt ([9e14fce](9e14fce0e4))
    * attempt to fix offset ([e6c5d0e](e6c5d0eed8))
    * attempt to retrieve blocks from iterator ([f718071](f718071b33))
    * attempt to tweak rocksdb ([11b9b6b](11b9b6be62))
    * auto enable stacks predicate ([30557f8](30557f8667))
    * backpressure on traversals ([3177e22](3177e22921))
    * batch inscription ([cd1085c](cd1085ceb0))
    * batch migration ([ed8b7ad](ed8b7ad2f3))
    * better redis error handling ([debb06c](debb06cd5c))
    * better support of reinscriptions ([a1410e2](a1410e29dd))
    * better termination ([8a5482c](8a5482c131))
    * binary name ([4950a50](4950a50381))
    * block streaming ([dcdfd16](dcdfd1655c))
    * boot sequence ([577f1c2](577f1c237e))
    * boot sequence, logs, format ([d03f851](d03f85178d))
    * borrow issue ([66e2a7c](66e2a7c785))
    * broken build ([f0d471e](f0d471ea8b))
    * broken test ([239b26a](239b26a614))
    * broken tests ([2ab6e7d](2ab6e7d679))
    * build ([4067f08](4067f0814f))
    * build ([607ac95](607ac953b1))
    * build error ([d6ed108](d6ed10894c))
    * build error ([bbede8b](bbede8b546))
    * build error ([fa802fa](fa802fae7a))
    * build error ([44ca74b](44ca74b2c5))
    * build error ([053b781](053b7815a8))
    * build error ([5c3bcf4](5c3bcf42fc))
    * build error ([b78c0cc](b78c0ccea6))
    * build error ([879ed67](879ed6775a))
    * build errors ([60cd4d0](60cd4d0c61))
    * build errors ([8dd91bf](8dd91bfce3))
    * build errors / merge snafu ([47da0c1](47da0c132a))
    * build errors + warnings ([938c6df](938c6dff27))
    * build failing ([83f1496](83f14964a6))
    * build warning ([561e51e](561e51eb27))
    * build warning ([75847df](75847df0d1))
    * build warning ([0194483](0194483b75))
    * build warnings ([d3e998c](d3e998c469))
    * build warnings ([e7ad175](e7ad175805))
    * build warnings ([670bde6](670bde6379))
    * bump incoming payload limit to 20mb ([7e15086](7e150861a4))
    * cache invalidation ([05bd903](05bd9035eb))
    * cache L2 capacity ([e2fbc73](e2fbc73eaf))
    * cache size ([ce61205](ce61205b96))
    * cache's ambitions ([e438db7](e438db7514))
    * Cargo.toml ([759c3a3](759c3a393f))
    * chain mixup, add logs ([0427a10](0427a10a63))
    * change forking behavior ([4c10014](4c100147c2))
    * clean expectations ([f9e089f](f9e089f90d))
    * clear cache more regularly ([c3b884f](c3b884fd30))
    * command for db patch ([27f6838](27f683818d))
    * commands doc ([3485e6f](3485e6f3d9))
    * compatibility with clarinet ([a282655](a28265509f))
    * condition ([0233dc5](0233dc5bf0))
    * create dummy inscription for sats overflow ([84aa6ce](84aa6ce7fd))
    * db init command ([55e293b](55e293b3ca))
    * decrease compression - from 4 bytes to 8 bytes ([b2eb314](b2eb31424b))
    * deployer predicate wildcard ([05ca395](05ca395da1))
    * disable sleep ([41ecace](41ecacee0e))
    * disable steam scan when scanning past blocks ([e2949d2](e2949d213a))
    * disambiguate inscription_output_value and inscription_fee ([9816cbb](9816cbb70a))
    * do not panic ([a0fa1a9](a0fa1a9301))
    * doc drift ([b595339](b595339024))
    * docker build ([df39302](df39302616))
    * docker file ([6ad5206](6ad52061eb))
    * dockerfile ([73ad612](73ad612ea4))
    * dockerfile ([da21ec4](da21ec4cb9))
    * documentation drift ([c5335a7](c5335a765c))
    * documentation drift ([38153ca](38153ca22f))
    * don't early exit when satoshi computing fail ([a8d76b0](a8d76b03ac))
    * don't enable predicate if error ([1274cbf](1274cbf9c4))
    * early return ([8f97b56](8f97b5643b))
    * edge case when requests processed in order ([8c4325f](8c4325f721))
    * edge case when requests processed out of order ([a35cea2](a35cea2b54))
    * edge case when requests processed out of order ([a6651b8](a6651b851f))
    * enable profiling ([f99b073](f99b073528))
    * enable specs on reboot ([f23be24](f23be246c2))
    * enforce db reconnection in http endpoints ([bcd2a45](bcd2a45a86))
    * enum serialization ([67cb340](67cb340674))
    * error management ([f0274f5](f0274f5726))
    * export all types on ts client ([be8bfbc](be8bfbcf60))
    * failing build ([1502d5d](1502d5d682))
    * fee ([0337f92](0337f92ce0))
    * filter out sat overflows from payloads ([ce439ae](ce439ae900))
    * gap in stacks scanning ([8c8c5c8](8c8c5c8611))
    * generator typo ([8a7eddb](8a7eddb092))
    * handle hint and case of re-inscriptions ([f86b184](f86b184832))
    * handle non-spending transaction ([cb01eb5](cb01eb55fd))
    * handle re-inscription for unbound inscriptions ([a1ffc1a](a1ffc1a59a))
    * hard coded dev-dependency ([5c105de](5c105de8b5))
    * ignore invalid inscription ([f18bc00](f18bc00f5a))
    * ignore transaction aborting that we could not classify ([37c80f7](37c80f7e83))
    * implement error handler ([d071b81](d071b81954))
    * improve progress bar ([b28da56](b28da5697d))
    * improve rewrite block command ([d524771](d52477142a))
    * in-block re-inscription case ([90db9c3](90db9c3d15))
    * include blocks discovered during scan, if any ([1eabce2](1eabce25c3))
    * include ordinals operations in standardized blocks ([a13351d](a13351d46a))
    * include proof on scan commands ([6574008](6574008ae8))
    * increase number of retries ([343ddd6](343ddd65a8))
    * indexing ([45661ab](45661ab62c))
    * inject l1 cache hit in results (+ clearing) ([62fd929](62fd92948e))
    * inscription fee ([2ac3022](2ac302235c))
    * inscription_number ([a7d8153](a7d8153a8c))
    * insert new locations ([6475aeb](6475aeb8d4))
    * iterate on values ([0c73e62](0c73e62902))
    * keep trying opening rocksdb conn if failing ([dbc794a](dbc794a0d4))
    * lazy block approach ([b567322](b567322859))
    * leader_registered doc ([f9d7370](f9d7370c43))
    * loading predicates from redis ([3bd308f](3bd308fb15))
    * log level, zeromq dependency ([4a2a6ef](4a2a6ef297))
    * logic determining start height ([5dd300f](5dd300fb05))
    * logs ([81be24e](81be24ef08))
    * mark inscriber_address as nullable ([77fd88b](77fd88b9c1))
    * more pessimism on retries ([9b987c5](9b987c51a9))
    * move parsing back to network thread ([bad1ee6](bad1ee6d4e))
    * moving stacks tip ([87c409e](87c409e01c))
    * multithreading cap ([c80ae60](c80ae60991))
    * myriad of improvements ([0633182](063318233d))
    * nefarious logs ([3b01a48](3b01a48f1e))
    * network, cascade changes ([1f45ec2](1f45ec26da))
    * off by one ([2a0e75f](2a0e75f6a3))
    * off by one ([c31611f](c31611fb28))
    * off by one ([94e1141](94e11411f8))
    * off by one ([abf70e7](abf70e7204))
    * off by one error ([3832cf9](3832cf9770))
    * off by one inscriptions number ([cdfbf48](cdfbf487fa))
    * off by one isssue ([fead2ed](fead2ed693))
    * off by one issue ([a8988ba](a8988ba573))
    * off by one issue ([155e3a6](155e3a6d29))
    * off by one issue on sats overflow ([8a12004](8a120040e7))
    * off-by-one error in backward traversal ([d4128aa](d4128aa8a1))
    * off-by-one in sats number resolution ([42acbeb](42acbebcd5))
    * offset ([278a655](278a65524b))
    * only avoid override for blessed inscriptions ([b50bbc1](b50bbc1bf7))
    * optimize reg and dereg ([c2ec1b5](c2ec1b5283))
    * ordinals scans ([62b62bd](62b62bd98a))
    * outdated dockerfile ([771b036](771b0362b2))
    * outdated documentation ([f472a49](f472a49c42))
    * overriden inscriptions ([25c6441](25c6441404))
    * parsing ([1f047a9](1f047a9162))
    * patch absence of witness data ([f8fcfca](f8fcfcad6d))
    * patch boot latency ([0e3faf9](0e3faf9a61))
    * patch crach ([20d9df6](20d9df6c65))
    * patch db call ([d385df2](d385df2037))
    * pipeline logic ([a864c85](a864c85c33))
    * pipeline resuming ([06883c6](06883c655a))
    * ports ([3ee98a8](3ee98a8be9))
    * potential resolve coinbase spent ([5d26738](5d267380f7))
    * PoxInfo default for scan commands ([a00ccf5](a00ccf589a))
    * predicate documentation ([572cf20](572cf202ba))
    * predicate generator network ([8f0ae21](8f0ae216c8))
    * provide optional values ([2cbf87e](2cbf87ebcc))
    * re-apply initial fix ([f5cb516](f5cb516ee0))
    * re-arrange logs ([2857d0a](2857d0a1a4))
    * re-enable sleep ([0f61a26](0f61a26fda))
    * re-initiate inscriptions connection every 250 blocks ([39671f4](39671f4378))
    * re-qualify error to warn ([9431684](9431684afe))
    * re-wire cmd ([a1447ad](a1447ad277))
    * README ([db1d584](db1d584827))
    * recreate db conn on a regular basis ([81d8575](81d85759a4))
    * redis update ([d4889f1](d4889f16b7))
    * related issue ([4b3a0da](4b3a0daa43))
    * remove rocksdb reconnect ([f2b067e](f2b067e85e))
    * remove sleep ([c371e74](c371e74de7))
    * remove start logic ([a04711a](a04711ad7c))
    * remove support for p2wsh inscription reveal support ([4fe71f2](4fe71f2622))
    * remove symbols ([108117b](108117b82e))
    * remove thread_max * 2 ([359c6f9](359c6f9422))
    * reopen connect on failures ([3e15da5](3e15da5565))
    * reply with 500 on payload processing error ([eaa6d7b](eaa6d7b640))
    * report generation ([0dce12a](0dce12a4e2))
    * restore stable values ([fb5c591](fb5c591943))
    * return blocks to rollback in reverse order ([9fab5a3](9fab5a34a2))
    * reuse existing computation for fix ([222f7c3](222f7c3a14))
    * revert fix, avoid collision in traversals map ([dfcadec](dfcadec680))
    * revisit log level ([4168661](416866123a))
    * revisit transfer loop ([1f2151c](1f2151c098))
    * rocket_okapi version ([2af31a8](2af31a8e64))
    * safer db open, dockerfile ([43d37d7](43d37d73f2))
    * safer error handling ([11509e4](11509e4435))
    * sat offset computation ([b278b66](b278b66f84))
    * sats overflow handling ([a3f745c](a3f745cfa7))
    * schema for curse_type ([72d43c6](72d43c6b41))
    * serialize handlers in one thread ([cdfc264](cdfc264cff))
    * slow down initial configuration ([3096ad3](3096ad3b26))
    * sql query ([1a3bc42](1a3bc428ea))
    * sql query bis ([a479884](a4798848b1))
    * sql request ([6345df2](6345df2652))
    * sql table setup ([c8884a7](c8884a7dbe))
    * stack overflow ([aed7d5d](aed7d5d005))
    * stacks predicate format ([fcf9fb0](fcf9fb0e3f))
    * start_block off by one ([b99f7b0](b99f7b0011))
    * streamline txid handling ([ad48351](ad48351044))
    * test suite ([c7672f9](c7672f91a1))
    * test warns and errors ([0887d6b](0887d6b8ca))
    * threading model ([c9c43ae](c9c43ae3e3))
    * threading model ([c2354fc](c2354fcacd))
    * track interrupted scans ([2b51dca](2b51dca8f3))
    * transaction type schema ([c35a737](c35a737ed2))
    * transfer recomputing commit ([3643636](364363680f))
    * transfer tracking ([0ea85e3](0ea85e3d20))
    * transfer tracking ([30f299e](30f299ef7c))
    * transfer tracking ([0cd29f5](0cd29f5925))
    * transfer tracking + empty blocks ([dc94875](dc948755b2))
    * traversals algo ([e8ee3ab](e8ee3ab036))
    * tweak rocksdb options ([a0a6950](a0a69502d8))
    * typo ([b0498bb](b0498bb048))
    * typo ([baa773f](baa773ff4d))
    * unexpected expectation ([7dd362b](7dd362b4f5))
    * unify rosetta operation schemas ([bf3216b](bf3216b100))
    * unused imports ([3aab402](3aab4022ab))
    * update chainhook schema ([4e82714](4e8271491b))
    * update inscription_number ([89b94e7](89b94e7d5d))
    * update license ([6ebeb77](6ebeb77d6a))
    * update rust version in docker build ([fab6f69](fab6f69df5))
    * update spec status ([e268925](e2689255b7))
    * update/pin dependencies ([#311](https://github.com/hirosystems/ordhook/issues/311)) ([f54b374](f54b374b24)), closes [#310](https://github.com/hirosystems/ordhook/issues/310)
    * use first input to stick with ord spec interpretation / implementation ([206678f](206678f0d1))
    * use rpc instead of rest ([1b18818](1b188182f1))
    * zeromq, subsidy issue ([dbca70c](dbca70c197))

    ### Reverts

    * Revert "chore: tmp patch" ([3e022ca](3e022ca322))

commit 4ef18d5b1e
Merge: d111c44 4cde5e8
Author: Scott McClellan <scott.mcclellan@gmail.com>
Date:   Wed Sep 6 18:44:26 2023 -0500

    Merge pull request #168 from hirosystems/develop

    Merge up `develop` to `main`

* fix: CI rust version mismatch, create empty db  (#173)

* fix: create db if does not exists

* chore: update rust version

* chore: bump version to 1.0.1

* fix: service boot sequence (#175)

* fix: ci

* fix: initial flow (#178)

* chore: update chainhook-sdk + cascade changes

* fix: update archive url

* feat: only create rocksdb if sqlite present

* fix: use crossbeam channel instead of std

* fix: improve error message

* doc: update README

* fix: build warnings

* fix: block_archiving expiration

* fix: archive url

* fix: read content len from http header

* chore: untar sqlite file

* chore: bump versions

---------

Co-authored-by: Scott McClellan <scott.mcclellan@gmail.com>
This commit is contained in:
Ludo Galabru
2023-09-20 00:18:03 -04:00
committed by GitHub
parent d23edf3d80
commit ec1f28ea50
17 changed files with 671 additions and 794 deletions

1301
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -4,5 +4,5 @@ members = [
"components/ordhook-core",
"components/ordhook-sdk-js"
]
default-members = ["components/ordhook-cli"]
resolver = "2"

View File

@@ -112,4 +112,15 @@ will spin up a HTTP API for managing events destinations.
A comprehensive OpenAPI specification explaining how to interact with this HTTP REST API can be found [here](https://github.com/hirosystems/chainhook/blob/develop/docs/chainhook-openapi.json).
---
### Troubleshooting: Performance and System Requirements
The Ordinals Theory protocol is resource-intensive, demanding significant CPU, memory, and disk capabilities. As we continue to refine and optimize, keep in mind the following system requirements and recommendations to ensure optimal performance:
CPU: The ordhook tool efficiently utilizes multiple cores when detected at runtime, parallelizing tasks to boost performance.
Memory: A minimum of 16GB RAM is recommended.
Disk: To enhance I/O performance, SSD or NVMe storage is suggested.
OS Requirements: Ensure your system allows for a minimum of 4096 open file descriptors. Configuration may vary based on your operating system. On certain systems, this can be adjusted using the `ulimit` command or the `launchctl limit` command.

View File

@@ -1,6 +1,6 @@
[package]
name = "ordhook-cli"
version = "1.0.1"
version = "1.0.2"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
@@ -15,7 +15,7 @@ serde = "1"
serde_json = "1"
serde_derive = "1"
reqwest = { version = "0.11", features = ["stream", "json"] }
hiro-system-kit = "0.1.0"
hiro-system-kit = "0.3.1"
clap = { version = "3.2.23", features = ["derive"], optional = true }
clap_generate = { version = "3.0.3", optional = true }
toml = { version = "0.5.6", features = ["preserve_order"], optional = true }

View File

@@ -37,7 +37,7 @@ max_caching_memory_size_mb = 32000
# Disable the following section if the state
# must be built locally
[bootstrap]
download_url = "https://archive.hiro.so/mainnet/chainhooks/hord.sqlite"
download_url = "https://archive.hiro.so/mainnet/ordhook/mainnet-ordhook-sqlite-latest"
[logs]
ordinals_internals = true

View File

@@ -1,6 +1,6 @@
[package]
name = "ordhook"
version = "0.4.0"
version = "0.5.0"
edition = "2021"
[dependencies]
@@ -12,9 +12,9 @@ redis = "0.21.5"
serde-redis = "0.12.0"
hex = "0.4.3"
rand = "0.8.5"
chainhook-sdk = { version = "=0.9.0", default-features = false, features = ["zeromq", "log"] }
chainhook-sdk = { version = "=0.9.5", default-features = false, features = ["zeromq", "log"] }
# chainhook-sdk = { version = "=0.9.0", path = "../../../chainhook/components/chainhook-sdk", default-features = false, features = ["zeromq", "log"] }
hiro-system-kit = "0.1.0"
hiro-system-kit = "0.3.1"
reqwest = { version = "0.11", features = ["stream", "json"] }
tokio = { version = "=1.24", features = ["full"] }
futures-util = "0.3.24"

View File

@@ -7,7 +7,7 @@ use chainhook_sdk::types::{
use std::path::PathBuf;
const DEFAULT_MAINNET_ORDINALS_SQLITE_ARCHIVE: &str =
"https://archive.hiro.so/mainnet/chainhooks/hord.sqlite";
"https://archive.hiro.so/mainnet/ordhook/mainnet-ordhook-sqlite-latest";
pub const DEFAULT_INGESTION_PORT: u16 = 20455;
pub const DEFAULT_CONTROL_PORT: u16 = 20456;
@@ -155,7 +155,7 @@ impl Config {
}
pub fn expected_remote_ordinals_sqlite_url(&self) -> String {
format!("{}.gz", self.expected_remote_ordinals_sqlite_base_url())
format!("{}.tar.gz", self.expected_remote_ordinals_sqlite_base_url())
}
pub fn devnet_default() -> Config {

View File

@@ -14,12 +14,12 @@ use chainhook_sdk::{
use crate::{
config::{Config, LogConfig},
db::find_lazy_block_at_block_height,
db::{find_lazy_block_at_block_height, open_readwrite_ordhook_db_conn_rocks_db},
};
use crate::db::{
find_last_block_inserted, find_latest_inscription_block_height, initialize_ordhook_db,
open_readonly_ordhook_db_conn, open_readonly_ordhook_db_conn_rocks_db,
open_readonly_ordhook_db_conn,
};
use crate::db::LazyBlockTransaction;
@@ -94,6 +94,26 @@ pub fn compute_next_satpoint_data(
SatPosition::Output((output_index, (offset_cross_inputs - offset_intra_outputs)))
}
pub fn should_sync_rocks_db(
config: &Config,
ctx: &Context,
) -> Result<Option<(u64, u64)>, String> {
let blocks_db = open_readwrite_ordhook_db_conn_rocks_db(&config.expected_cache_path(), &ctx)?;
let inscriptions_db_conn = open_readonly_ordhook_db_conn(&config.expected_cache_path(), &ctx)?;
let last_compressed_block = find_last_block_inserted(&blocks_db) as u64;
let last_indexed_block = match find_latest_inscription_block_height(&inscriptions_db_conn, ctx)? {
Some(last_indexed_block) => last_indexed_block,
None => 0
};
let res = if last_compressed_block < last_indexed_block {
Some((last_compressed_block, last_indexed_block))
} else {
None
};
Ok(res)
}
pub fn should_sync_ordhook_db(
config: &Config,
ctx: &Context,
@@ -110,7 +130,7 @@ pub fn should_sync_ordhook_db(
}
};
let blocks_db = open_readonly_ordhook_db_conn_rocks_db(&config.expected_cache_path(), &ctx)?;
let blocks_db = open_readwrite_ordhook_db_conn_rocks_db(&config.expected_cache_path(), &ctx)?;
let mut start_block = find_last_block_inserted(&blocks_db) as u64;
if start_block == 0 {

View File

@@ -1,18 +1,16 @@
use std::{
sync::mpsc::Sender,
thread::{sleep, JoinHandle},
time::Duration,
};
use crossbeam_channel::{Sender, TryRecvError};
use chainhook_sdk::{types::BitcoinBlockData, utils::Context};
use crossbeam_channel::TryRecvError;
use rocksdb::DB;
use crate::{
config::Config,
core::pipeline::{PostProcessorCommand, PostProcessorController, PostProcessorEvent},
db::{
insert_entry_in_blocks, open_ordhook_db_conn_rocks_db_loop,
insert_entry_in_blocks,
open_readwrite_ordhook_db_conn_rocks_db, LazyBlock,
},
};
@@ -30,10 +28,9 @@ pub fn start_block_archiving_processor(
let ctx = ctx.clone();
let handle: JoinHandle<()> = hiro_system_kit::thread_named("Processor Runloop")
.spawn(move || {
let mut blocks_db_rw =
let blocks_db_rw =
open_readwrite_ordhook_db_conn_rocks_db(&config.expected_cache_path(), &ctx)
.unwrap();
let mut empty_cycles = 0;
let mut processed_blocks = 0;
loop {
@@ -49,16 +46,7 @@ pub fn start_block_archiving_processor(
}
Err(e) => match e {
TryRecvError::Empty => {
empty_cycles += 1;
if empty_cycles == 30 {
warn!(ctx.expect_logger(), "Block processor reached expiration");
let _ = events_tx.send(PostProcessorEvent::Expired);
break;
}
sleep(Duration::from_secs(1));
if empty_cycles > 120 {
break;
}
continue;
}
_ => {
@@ -71,11 +59,6 @@ pub fn start_block_archiving_processor(
if processed_blocks % 10_000 == 0 {
let _ = blocks_db_rw.flush_wal(true);
blocks_db_rw = open_ordhook_db_conn_rocks_db_loop(
true,
&config.expected_cache_path(),
&ctx,
);
}
}

View File

@@ -228,7 +228,7 @@ pub fn open_readonly_ordhook_db_conn_rocks_db(
opts.set_disable_auto_compactions(true);
opts.set_max_background_jobs(0);
let db = DB::open_for_read_only(&opts, path, false)
.map_err(|e| format!("unable to open blocks_db: {}", e.to_string()))?;
.map_err(|e| format!("unable to open hord.rocksdb: {}", e.to_string()))?;
Ok(db)
}
@@ -276,7 +276,7 @@ pub fn open_readwrite_ordhook_db_conn_rocks_db(
let path = get_default_ordhook_db_file_path_rocks_db(&base_dir);
let opts = rocks_db_default_options();
let db = DB::open(&opts, path)
.map_err(|e| format!("unable to open blocks_db: {}", e.to_string()))?;
.map_err(|e| format!("unable to open hord.rocksdb: {}", e.to_string()))?;
Ok(db)
}

View File

@@ -6,6 +6,7 @@ use flate2::read::GzDecoder;
use futures_util::StreamExt;
use progressing::mapping::Bar as MappingBar;
use progressing::Baring;
use tar::Archive;
use std::fs;
use std::io::{self, Cursor};
use std::io::{Read, Write};
@@ -19,7 +20,7 @@ pub fn default_sqlite_sha_file_path(_network: &BitcoinNetwork) -> String {
}
pub async fn download_sqlite_file(config: &Config, _ctx: &Context) -> Result<(), String> {
let mut destination_path = config.expected_cache_path();
let destination_path = config.expected_cache_path();
std::fs::create_dir_all(&destination_path).unwrap_or_else(|e| {
println!("{}", e.to_string());
});
@@ -46,22 +47,21 @@ pub async fn download_sqlite_file(config: &Config, _ctx: &Context) -> Result<(),
// Download chunks
let (tx, rx) = flume::bounded(0);
destination_path.push(default_sqlite_file_path(&config.network.bitcoin_network));
let decoder_thread = std::thread::spawn(move || {
let input = ChannelRead::new(rx);
let mut decoder = GzDecoder::new(input);
let mut content = Vec::new();
let _ = decoder.read_to_end(&mut content);
let mut file = fs::File::create(&destination_path).unwrap();
if let Err(e) = file.write_all(&content[..]) {
let mut archive = Archive::new(&content[..]);
if let Err(e) = archive.unpack(&destination_path) {
println!("unable to write file: {}", e.to_string());
std::process::exit(1);
}
});
if res.status() == reqwest::StatusCode::OK {
let limit = 5_400_000_000;
let limit = res.content_length().unwrap_or(10_000_000_000) as i64;
let mut progress_bar = MappingBar::with_range(0i64, limit);
progress_bar.set_len(60);
let mut stdout = std::io::stdout();

View File

@@ -234,14 +234,14 @@ pub async fn process_block_with_predicates(
predicates: &Vec<&BitcoinChainhookSpecification>,
event_observer_config: &EventObserverConfig,
ctx: &Context,
) -> Result<u32, ()> {
) -> Result<u32, String> {
let chain_event =
BitcoinChainEvent::ChainUpdatedWithBlocks(BitcoinChainUpdatedWithBlocksData {
new_blocks: vec![block],
confirmed_blocks: vec![],
});
let (predicates_triggered, _predicates_evaluated) =
let (predicates_triggered, _predicates_evaluated, _) =
evaluate_bitcoin_chainhooks_on_chain_event(&chain_event, predicates, ctx);
execute_predicates_action(predicates_triggered, &event_observer_config, &ctx).await
@@ -251,7 +251,7 @@ pub async fn execute_predicates_action<'a>(
hits: Vec<BitcoinTriggerChainhook<'a>>,
config: &EventObserverConfig,
ctx: &Context,
) -> Result<u32, ()> {
) -> Result<u32, String> {
let mut actions_triggered = 0;
let mut proofs = HashMap::new();
for trigger in hits.into_iter() {

View File

@@ -99,7 +99,7 @@ fn handle_get_predicates(
let serialized_predicates = predicates
.iter()
.map(|(p, _)| p.into_serialized_json())
.map(|(p, s)| serialized_predicate_with_status(p, s))
.collect::<Vec<_>>();
Json(json!({
@@ -311,3 +311,27 @@ pub fn load_predicates_from_redis(
.map_err(|e| format!("unable to connect to redis: {}", e.to_string()))?;
get_entries_from_predicates_db(&mut predicate_db_conn, ctx)
}
fn serialized_predicate_with_status(
predicate: &ChainhookSpecification,
status: &PredicateStatus,
) -> JsonValue {
match (predicate, status) {
(ChainhookSpecification::Stacks(spec), status) => json!({
"chain": "stacks",
"uuid": spec.uuid,
"network": spec.network,
"predicate": spec.predicate,
"status": status,
"enabled": spec.enabled,
}),
(ChainhookSpecification::Bitcoin(spec), status) => json!({
"chain": "bitcoin",
"uuid": spec.uuid,
"network": spec.network,
"predicate": spec.predicate,
"status": status,
"enabled": spec.enabled,
}),
}
}

View File

@@ -4,6 +4,7 @@ mod runloops;
use crate::config::{Config, PredicatesApi};
use crate::core::pipeline::download_and_pipeline_blocks;
use crate::core::pipeline::processors::block_archiving::start_block_archiving_processor;
use crate::core::pipeline::processors::inscription_indexing::process_block;
use crate::core::pipeline::processors::start_inscription_indexing_processor;
use crate::core::pipeline::processors::transfers_recomputing::start_transfers_recomputing_processor;
@@ -11,7 +12,7 @@ use crate::core::protocol::inscription_parsing::{
get_inscriptions_revealed_in_block, parse_inscriptions_in_standardized_block,
};
use crate::core::protocol::inscription_sequencing::SequenceCursor;
use crate::core::{new_traversals_lazy_cache, should_sync_ordhook_db};
use crate::core::{new_traversals_lazy_cache, should_sync_ordhook_db, should_sync_rocks_db};
use crate::db::{
delete_data_in_ordhook_db, insert_entry_in_blocks,
update_inscriptions_with_block, update_locations_with_block,
@@ -465,6 +466,38 @@ impl Service {
&self,
block_post_processor: Option<crossbeam_channel::Sender<BitcoinBlockData>>,
) -> Result<(), String> {
// First, make sure that rocksdb and sqlite are aligned.
// If rocksdb.chain_tip.height <= sqlite.chain_tip.height
// Perform some block compression until that height.
if let Some((start_block, end_block)) = should_sync_rocks_db(&self.config, &self.ctx)? {
let blocks_post_processor = start_block_archiving_processor(
&self.config,
&self.ctx,
false,
block_post_processor.clone(),
);
self.ctx.try_log(|logger| {
info!(
logger,
"Compressing blocks (from #{start_block} to #{end_block})"
)
});
let ordhook_config = self.config.get_ordhook_config();
let first_inscription_height = ordhook_config.first_inscription_height;
let blocks = BlockHeights::BlockRange(start_block, end_block).get_sorted_entries();
download_and_pipeline_blocks(
&self.config,
blocks.into(),
first_inscription_height,
Some(&blocks_post_processor),
10_000,
&self.ctx,
)
.await?;
}
// Start predicate processor
let mut last_block_processed = 0;
while let Some((start_block, end_block, speed)) =

View File

@@ -248,6 +248,7 @@ pub fn create_and_consolidate_chainhook_config_with_predicates(
blocks: None,
start_block: None,
end_block: None,
expired_at: None,
expire_after_occurrence: None,
predicate: chainhook_sdk::chainhooks::types::BitcoinPredicateType::OrdinalsProtocol(
chainhook_sdk::chainhooks::types::OrdinalOperations::InscriptionFeed,

View File

@@ -1,6 +1,6 @@
[package]
name = "ordhook-sdk-js"
version = "0.4.0"
version = "0.5.0"
edition = "2021"
exclude = ["index.node"]

View File

@@ -82,7 +82,7 @@ max_caching_memory_size_mb = 32000
# Disable the following section if the state
# must be built locally
[bootstrap]
download_url = "https://archive.hiro.so/mainnet/chainhooks/hord.sqlite"
download_url = "https://archive.hiro.so/mainnet/ordhook/mainnet-ordhook-sqlite-latest"
[logs]
ordinals_internals = true