feat: jubilee support, disk optimizations (#239)

* chore: cargo fmt

* fix: making db access more defensive

* chore: clippy

* fix: reopen conn on each iteration

* fix: retry mechanism for statement prepare

* refactor: sqlite requests retries

* feat: set explicit timeout value

* fix: make logging optional

* chore: logger cleaning

* fix: result looping

* feat: introduce ORDHOOK_MAINTENANCE mode, revisit cli options

* chore: re-qualify logs

* chore: enable mode code site

* chore: re-qualify log

* fix: keep trying opening conn

* fix: improve rocksdb resiliency with retries

* fix: baseline experiment

* chore: update chainhook-sdk

* fix: testnet support

* Squashed commit of the following:

commit a84c9517cf
Merge: aaef4b2 1ec9374
Author: Ludo Galabru <ludo@hiro.so>
Date:   Thu Nov 23 11:33:52 2023 -0500

    Merge branch 'main' into develop

commit aaef4b25df
Author: Ludo Galabru <ludo@hiro.so>
Date:   Thu Nov 23 11:32:47 2023 -0500

    chore: update dependencies

commit 1ec9374c3a
Author: semantic-release-bot <semantic-release-bot@martynus.net>
Date:   Thu Nov 23 12:33:35 2023 +0000

    chore(release): 1.1.0 [skip ci]

    ## [1.1.0](https://github.com/hirosystems/ordhook/compare/v1.0.1...v1.1.0) (2023-11-23)

    ### Features

    * ordhook-sdk-js refactoring ([#186](https://github.com/hirosystems/ordhook/issues/186)) ([0d145df](0d145dfb89))

    ### Bug Fixes

    * add destination to transfer events - release v1.0.2 ([47f365e](47f365eb47)), closes [#170](https://github.com/hirosystems/ordhook/issues/170) [#171](https://github.com/hirosystems/ordhook/issues/171) [#285](https://github.com/hirosystems/ordhook/issues/285) [#310](https://github.com/hirosystems/ordhook/issues/310) [#168](https://github.com/hirosystems/ordhook/issues/168) [#173](https://github.com/hirosystems/ordhook/issues/173) [#175](https://github.com/hirosystems/ordhook/issues/175) [#178](https://github.com/hirosystems/ordhook/issues/178) [#182](https://github.com/hirosystems/ordhook/issues/182) [#183](https://github.com/hirosystems/ordhook/issues/183)
    * build error / warning ([055c0d7](055c0d78d6))
    * ci ([ac3d458](ac3d4580f9))
    * CI rust version mismatch, create empty db  ([#173](https://github.com/hirosystems/ordhook/issues/173)) ([cd2842e](cd2842eac7))
    * databases lock ([d0b57c5](d0b57c5771))
    * enable streaming for in-memory observers ([#171](https://github.com/hirosystems/ordhook/issues/171)) ([50f8393](50f8393ae3))
    * grammar tweaks ([54e5fa1](54e5fa1321))
    * grammar tweaks ([e50aef0](e50aef00b4))
    * grammar updates ([66a4559](66a4559aec))
    * initial flow ([#178](https://github.com/hirosystems/ordhook/issues/178)) ([8bb24be](8bb24beb9a))
    * release 1.0.2 ([#179](https://github.com/hirosystems/ordhook/issues/179)) ([ec1f28e](ec1f28ea50)), closes [#170](https://github.com/hirosystems/ordhook/issues/170) [#171](https://github.com/hirosystems/ordhook/issues/171) [#285](https://github.com/hirosystems/ordhook/issues/285) [#310](https://github.com/hirosystems/ordhook/issues/310) [#168](https://github.com/hirosystems/ordhook/issues/168) [#173](https://github.com/hirosystems/ordhook/issues/173) [#175](https://github.com/hirosystems/ordhook/issues/175) [#178](https://github.com/hirosystems/ordhook/issues/178)
    * release develop ([#214](https://github.com/hirosystems/ordhook/issues/214)) ([4a31032](4a3103233b))
    * release v1.0.2 ([#180](https://github.com/hirosystems/ordhook/issues/180)) ([ac3915f](ac3915f035)), closes [#170](https://github.com/hirosystems/ordhook/issues/170) [#171](https://github.com/hirosystems/ordhook/issues/171) [#285](https://github.com/hirosystems/ordhook/issues/285) [#310](https://github.com/hirosystems/ordhook/issues/310) [#168](https://github.com/hirosystems/ordhook/issues/168) [#173](https://github.com/hirosystems/ordhook/issues/173) [#175](https://github.com/hirosystems/ordhook/issues/175) [#178](https://github.com/hirosystems/ordhook/issues/178)
    * service boot sequence ([#175](https://github.com/hirosystems/ordhook/issues/175)) ([a744825](a74482588c))

commit 4a3103233b
Author: Ludo Galabru <ludo@hiro.so>
Date:   Thu Nov 23 07:24:07 2023 -0500

    fix: release develop (#214)

commit d0b57c5771
Author: Ludo Galabru <ludo@hiro.so>
Date:   Thu Nov 23 07:18:06 2023 -0500

    fix: databases lock

commit 2b49397848
Merge: f820169 34fc0d3
Author: Ludo Galabru <ludo@hiro.so>
Date:   Thu Nov 23 07:17:01 2023 -0500

    Merge branch 'main' into develop

commit f820169aa0
Author: Ludo Galabru <ludo@hiro.so>
Date:   Thu Nov 23 07:15:25 2023 -0500

    fix: better handling of database locks (#200)

commit 977a30eb8d
Author: omahs <73983677+omahs@users.noreply.github.com>
Date:   Thu Nov 23 13:06:00 2023 +0100

    fix typos (#211)

commit 490fe01434
Author: Ludo Galabru <ludo@hiro.so>
Date:   Thu Nov 16 19:48:14 2023 -0500

    fix: testnet support (#208)

commit 0d2ff313d0
Author: Ordinarius <137325073+ordinariusprof@users.noreply.github.com>
Date:   Thu Nov 16 08:30:23 2023 -0800

    fix: around issue #187 for testnet (#204)

    * Update mod.rs

    * workaround for #187

    * publish

    * build

    * revert

    * revert ci

commit 0714abf1b1
Author: aravindgee <aravindgee@users.noreply.github.com>
Date:   Wed Nov 15 10:31:43 2023 -0500

    CI: Enable CodeCov for Ordhook (#205)

commit 34fc0d362d
Merge: 47f365e 977ec59
Author: Ludo Galabru <ludo@hiro.so>
Date:   Wed Nov 1 20:55:07 2023 -0400

    Merge branch 'develop' into main

commit 47f365eb47
Author: Ludo Galabru <ludo@hiro.so>
Date:   Wed Nov 1 20:47:00 2023 -0400

    fix: add destination to transfer events - release v1.0.2

    * chore: stop auto-adding issues to DevTools Project (#170)

    * fix: enable streaming for in-memory observers (#171)

    * Squashed commit of the following:

    commit 9862b71c34
    Author: semantic-release-bot <semantic-release-bot@martynus.net>
    Date:   Thu Sep 7 00:06:39 2023 +0000

        chore(release): 1.0.0 [skip ci]

        ## 1.0.0 (2023-09-07)

        ### Features

        * ability to control inclusion of inputs/outputs/proofs/witness ([daf5547](daf55476c9))
        * ability to download hord.sqlite ([3dafa53](3dafa53ac0))
        * ability to generate config ([9fda9d0](9fda9d0d34))
        * ability to replay inscriptions ([f1adca9](f1adca9b0f))
        * ability to resume ([6c7eaa3](6c7eaa3bee))
        * ability to target blocks ([f6be49e](f6be49e24d))
        * ability to tolerate corrupted data ([adb1b98](adb1b988a6))
        * ability to track updates when scanning bitcoin (+refactor) ([9e54bff](9e54bfff35))
        * ability to update stacks db from cli + fix caching logic ([3ea9f59](3ea9f597af))
        * add command to check stacks db integrity ([322f473](322f47343c))
        * add get block command to cli ([97de0b0](97de0b071b))
        * add log, fix ordinal transfers scan ([c4202da](c4202dad2c))
        * add logs ([473ddd0](473ddd0595))
        * add metrics to `/ping` response of event observer server ([#297](https://github.com/hirosystems/ordhook/issues/297)) ([0e1ee7c](0e1ee7c1ee)), closes [#285](https://github.com/hirosystems/ordhook/issues/285)
        * add option to skip chainhook node ping ([a7c0b12](a7c0b12ad9))
        * add options for logs ([917090b](917090b408))
        * add post_transfer_output_value ([4ce0e9e](4ce0e9e5db))
        * add retry ([117e41e](117e41eae8))
        * add shared cache ([07523ae](07523aed1a))
        * add support for bitcoin op DelegatedStacking ([6516155](6516155055))
        * add transfers table ([db14f60](db14f60347))
        * always try to initialize tables when starting service ([1a9eddb](1a9eddb6aa))
        * attempt to scale up multithreading ([be91202](be91202d6b))
        * attempt to support cursed inscriptions ([9b45f90](9b45f908b8))
        * attempt transition to lazy model ([dda0b03](dda0b03ea3))
        * batch ingestion, improve cleaning ([168162e](168162e0dd))
        * better handling of blessed inscription turning cursed ([f11509a](f11509ab97))
        * cascade changes in CLI interface ([24f27fe](24f27fea63))
        * cascade hord activation ([42c090b](42c090ba7e))
        * chainhook-sdk config niteties ([7d9e179](7d9e179464))
        * class interface ([9dfec45](9dfec454f5))
        * client draft ([6a6451c](6a6451c864))
        * complete migration to lazy blocks ([fa50584](fa5058471a))
        * disable certs ([389f77d](389f77d473))
        * draft naive inscription detection ([9b3e38a](9b3e38a441))
        * draft ordhook-sdk-js ([b264e72](b264e7281b))
        * draft sha256 verification (wip) ([e6f0619](e6f0619a7c))
        * drafting lazy deserialization ([eaa2f71](eaa2f71fce))
        * dry config ([135297e](135297e978))
        * expose `is_streaming_blocks` prop ([1ba27d7](1ba27d7459))
        * expose more functions for working with the indexer ([654fead](654feadbdf))
        * expose scanning status in GET endpoint ([156c463](156c463cc0))
        * expose transfers_pre_inscription ([65afd77](65afd77492))
        * fetch full bitcoin block, including witness data ([ee9a345](ee9a3452ac))
        * fix download block ([38b50df](38b50df7a1))
        * handle stacks unconfirmed state scans ([f6d050f](f6d050fbce))
        * handle transfer ([fd5da52](fd5da52df4))
        * HTTP responses adjustments ([51572ef](51572efd93))
        * implement and document new development flow ([66019a0](66019a06e7))
        * implement zmq runloop ([c6c1c0e](c6c1c0ecce))
        * import inscription parser ([45e0147](45e0147ecf))
        * improve cli ergonomics ([991e33f](991e33ff42))
        * improve cli experience ([e865628](e8656285b2))
        * improve debug log ([5df77d7](5df77d7f84))
        * improve hord db commands ([21c09c2](21c09c296f))
        * improve onboarding ([deaa739](deaa739bdd))
        * improve ordinal scan efficiency ([e510d4b](e510d4bd09))
        * improve README ([f30e6f4](f30e6f4ed5))
        * improve repair command conveniency ([46be0ab](46be0ab5a7))
        * improving curse approach ([dcb8054](dcb805485f))
        * in-house thread pool ([bc5ffdd](bc5ffddb5b))
        * inscription replay speedup ([33a4f8b](33a4f8b6af))
        * introduce check command ([f17dc4c](f17dc4c343))
        * introduce evaluation reports ([54ad874](54ad874ee5))
        * introduce migration script ([8c2b16c](8c2b16cc48))
        * introduce new predicate + refactor schemas ([611c79c](611c79cee3))
        * introduce rocksdb storage for Stacks ([4564e88](4564e8818a))
        * introduce sync command ([ab022e6](ab022e6098))
        * introduce terminate function ([91616f6](91616f6531))
        * is_streaming_blocks ([aacf487](aacf487de6))
        * keep 1st tx in cache ([0978a5d](0978a5d4c1))
        * logic to start ingestion during indexing ([3c1c99d](3c1c99df5d))
        * merge "inscription_revealed" and "inscription_transferred" into "inscription_feed" ([741290d](741290de13))
        * migrate stacks scans to rocksdb ([4408b1e](4408b1e7ec))
        * migration to rocksdb, moving json parsing from networking thread ([5ad0147](5ad0147fa0))
        * move thread pool size to config ([bc313fa](bc313fad5c))
        * multithread traversals ([fba5c89](fba5c89a48))
        * number of retries for 4 to 3 ([b294dff](b294dff69a))
        * optimize memory ([5db1531](5db1531a3d))
        * optimize replay ([be26dac](be26daccd0))
        * ordinal inscription_transfer code complete ([f55a5ee](f55a5ee167))
        * plug inscription processing in ibd ([df36617](df36617214))
        * plumbing for ordhook-sdk-js ([7487589](74875896a3))
        * polish `hord find sat_point` command ([d071484](d0714842a2))
        * polish first impression ([3c2b00c](3c2b00ce38))
        * predicate schemas ([198cdaa](198cdaa6c8))
        * prototype warmup ([fa6c86f](fa6c86fb1f))
        * re-approach stacks block commit schema ([218d599](218d5998d6))
        * re-implement satoshi overflows handling ([8ea5bdf](8ea5bdf819))
        * re-introduce ingestion ([71c90d7](71c90d755d))
        * restore ability to replay transfers ([98e7e9b](98e7e9b21d))
        * return enable in api ([f39259c](f39259ceeb))
        * return local result when known ([5441851](5441851db7))
        * revisit caching strategy ([2705b95](2705b9501b))
        * revisit threading model ([05b6d5c](05b6d5c4d7))
        * scan inscription revealed ([84c5a0c](84c5a0c521))
        * scan inscription revealed ([644d515](644d5155d2))
        * share traversals_cache over 10 blocks spans ([b0378c3](b0378c3099))
        * simplify + improve coordination ([1922fd9](1922fd9bc4))
        * start investigating zmq signaling ([0ec2653](0ec265380c))
        * streamline processors ([13421db](13421db297))
        * support cursed inscriptions in chainhook client ([d7cc5a4](d7cc5a4410))
        * support for latest archives, add logs ([494cf3c](494cf3c9a5))
        * tweak mmap / page_size values ([5316a57](5316a575b0))
        * update chainhook-sdk ([f052e08](f052e08469))
        * update inscription transfer logic ([9d0d106](9d0d106e9c))
        * update inscription transfer schemas ([f80e983](f80e983481))
        * upgrade `service start`  implementation + documentation ([02db65e](02db65e417))
        * use caching on streamed blocks ([784e9a0](784e9a0830))
        * use thread pools for scans ([45b9abd](45b9abd3e0))
        * zmq sockets ([d2e328a](d2e328aa57))

        ### Bug Fixes

        * ability to run without redis ([96825c3](96825c35a8))
        * add busy handler ([d712e0d](d712e0ddae))
        * add exp backoff ([f014c14](f014c14277))
        * add retry logic in rocksdb ([247df20](247df2088a))
        * add retry logic to work around unexpected responses from bitcoind ([2ab6b32](2ab6b32ff0))
        * additional adjustments ([fe26063](fe26063513))
        * additional fixes (network, address, offsets) ([8006000](8006000034))
        * address build warnings ([dc623a0](dc623a01e5))
        * address non-inscribed block case ([a7d08a3](a7d08a3722))
        * address redis disconnects ([a6b4a5f](a6b4a5fb38))
        * address remaining issues ([74b2fa9](74b2fa9411))
        * adjust error message ([3e7b0d0](3e7b0d03f9))
        * allow empty block ([fe8ce45](fe8ce455a1))
        * always fetch blocks ([97060a1](97060a13ca))
        * async/await regression ([676aac1](676aac196d))
        * attempt ([9e14fce](9e14fce0e4))
        * attempt to fix offset ([e6c5d0e](e6c5d0eed8))
        * attempt to retrieve blocks from iterator ([f718071](f718071b33))
        * attempt to tweak rocksdb ([11b9b6b](11b9b6be62))
        * auto enable stacks predicate ([30557f8](30557f8667))
        * backpressure on traversals ([3177e22](3177e22921))
        * batch inscription ([cd1085c](cd1085ceb0))
        * batch migration ([ed8b7ad](ed8b7ad2f3))
        * better redis error handling ([debb06c](debb06cd5c))
        * better support of reinscriptions ([a1410e2](a1410e29dd))
        * better termination ([8a5482c](8a5482c131))
        * binary name ([4950a50](4950a50381))
        * block streaming ([dcdfd16](dcdfd1655c))
        * boot sequence ([577f1c2](577f1c237e))
        * boot sequence, logs, format ([d03f851](d03f85178d))
        * borrow issue ([66e2a7c](66e2a7c785))
        * broken build ([f0d471e](f0d471ea8b))
        * broken test ([239b26a](239b26a614))
        * broken tests ([2ab6e7d](2ab6e7d679))
        * build ([4067f08](4067f0814f))
        * build ([607ac95](607ac953b1))
        * build error ([d6ed108](d6ed10894c))
        * build error ([bbede8b](bbede8b546))
        * build error ([fa802fa](fa802fae7a))
        * build error ([44ca74b](44ca74b2c5))
        * build error ([053b781](053b7815a8))
        * build error ([5c3bcf4](5c3bcf42fc))
        * build error ([b78c0cc](b78c0ccea6))
        * build error ([879ed67](879ed6775a))
        * build errors ([60cd4d0](60cd4d0c61))
        * build errors ([8dd91bf](8dd91bfce3))
        * build errors / merge snafu ([47da0c1](47da0c132a))
        * build errors + warnings ([938c6df](938c6dff27))
        * build failing ([83f1496](83f14964a6))
        * build warning ([561e51e](561e51eb27))
        * build warning ([75847df](75847df0d1))
        * build warning ([0194483](0194483b75))
        * build warnings ([d3e998c](d3e998c469))
        * build warnings ([e7ad175](e7ad175805))
        * build warnings ([670bde6](670bde6379))
        * bump incoming payload limit to 20mb ([7e15086](7e150861a4))
        * cache invalidation ([05bd903](05bd9035eb))
        * cache L2 capacity ([e2fbc73](e2fbc73eaf))
        * cache size ([ce61205](ce61205b96))
        * cache's ambitions ([e438db7](e438db7514))
        * Cargo.toml ([759c3a3](759c3a393f))
        * chain mixup, add logs ([0427a10](0427a10a63))
        * change forking behavior ([4c10014](4c100147c2))
        * clean expectations ([f9e089f](f9e089f90d))
        * clear cache more regularly ([c3b884f](c3b884fd30))
        * command for db patch ([27f6838](27f683818d))
        * commands doc ([3485e6f](3485e6f3d9))
        * compatibility with clarinet ([a282655](a28265509f))
        * condition ([0233dc5](0233dc5bf0))
        * create dummy inscription for sats overflow ([84aa6ce](84aa6ce7fd))
        * db init command ([55e293b](55e293b3ca))
        * decrease compression - from 4 bytes to 8 bytes ([b2eb314](b2eb31424b))
        * deployer predicate wildcard ([05ca395](05ca395da1))
        * disable sleep ([41ecace](41ecacee0e))
        * disable steam scan when scanning past blocks ([e2949d2](e2949d213a))
        * disambiguate inscription_output_value and inscription_fee ([9816cbb](9816cbb70a))
        * do not panic ([a0fa1a9](a0fa1a9301))
        * doc drift ([b595339](b595339024))
        * docker build ([df39302](df39302616))
        * docker file ([6ad5206](6ad52061eb))
        * dockerfile ([73ad612](73ad612ea4))
        * dockerfile ([da21ec4](da21ec4cb9))
        * documentation drift ([c5335a7](c5335a765c))
        * documentation drift ([38153ca](38153ca22f))
        * don't early exit when satoshi computing fail ([a8d76b0](a8d76b03ac))
        * don't enable predicate if error ([1274cbf](1274cbf9c4))
        * early return ([8f97b56](8f97b5643b))
        * edge case when requests processed in order ([8c4325f](8c4325f721))
        * edge case when requests processed out of order ([a35cea2](a35cea2b54))
        * edge case when requests processed out of order ([a6651b8](a6651b851f))
        * enable profiling ([f99b073](f99b073528))
        * enable specs on reboot ([f23be24](f23be246c2))
        * enforce db reconnection in http endpoints ([bcd2a45](bcd2a45a86))
        * enum serialization ([67cb340](67cb340674))
        * error management ([f0274f5](f0274f5726))
        * export all types on ts client ([be8bfbc](be8bfbcf60))
        * failing build ([1502d5d](1502d5d682))
        * fee ([0337f92](0337f92ce0))
        * filter out sat overflows from payloads ([ce439ae](ce439ae900))
        * gap in stacks scanning ([8c8c5c8](8c8c5c8611))
        * generator typo ([8a7eddb](8a7eddb092))
        * handle hint and case of re-inscriptions ([f86b184](f86b184832))
        * handle non-spending transaction ([cb01eb5](cb01eb55fd))
        * handle re-inscription for unbound inscriptions ([a1ffc1a](a1ffc1a59a))
        * hard coded dev-dependency ([5c105de](5c105de8b5))
        * ignore invalid inscription ([f18bc00](f18bc00f5a))
        * ignore transaction aborting that we could not classify ([37c80f7](37c80f7e83))
        * implement error handler ([d071b81](d071b81954))
        * improve progress bar ([b28da56](b28da5697d))
        * improve rewrite block command ([d524771](d52477142a))
        * in-block re-inscription case ([90db9c3](90db9c3d15))
        * include blocks discovered during scan, if any ([1eabce2](1eabce25c3))
        * include ordinals operations in standardized blocks ([a13351d](a13351d46a))
        * include proof on scan commands ([6574008](6574008ae8))
        * increase number of retries ([343ddd6](343ddd65a8))
        * indexing ([45661ab](45661ab62c))
        * inject l1 cache hit in results (+ clearing) ([62fd929](62fd92948e))
        * inscription fee ([2ac3022](2ac302235c))
        * inscription_number ([a7d8153](a7d8153a8c))
        * insert new locations ([6475aeb](6475aeb8d4))
        * iterate on values ([0c73e62](0c73e62902))
        * keep trying opening rocksdb conn if failing ([dbc794a](dbc794a0d4))
        * lazy block approach ([b567322](b567322859))
        * leader_registered doc ([f9d7370](f9d7370c43))
        * loading predicates from redis ([3bd308f](3bd308fb15))
        * log level, zeromq dependency ([4a2a6ef](4a2a6ef297))
        * logic determining start height ([5dd300f](5dd300fb05))
        * logs ([81be24e](81be24ef08))
        * mark inscriber_address as nullable ([77fd88b](77fd88b9c1))
        * more pessimism on retries ([9b987c5](9b987c51a9))
        * move parsing back to network thread ([bad1ee6](bad1ee6d4e))
        * moving stacks tip ([87c409e](87c409e01c))
        * multithreading cap ([c80ae60](c80ae60991))
        * myriad of improvements ([0633182](063318233d))
        * nefarious logs ([3b01a48](3b01a48f1e))
        * network, cascade changes ([1f45ec2](1f45ec26da))
        * off by one ([2a0e75f](2a0e75f6a3))
        * off by one ([c31611f](c31611fb28))
        * off by one ([94e1141](94e11411f8))
        * off by one ([abf70e7](abf70e7204))
        * off by one error ([3832cf9](3832cf9770))
        * off by one inscriptions number ([cdfbf48](cdfbf487fa))
        * off by one isssue ([fead2ed](fead2ed693))
        * off by one issue ([a8988ba](a8988ba573))
        * off by one issue ([155e3a6](155e3a6d29))
        * off by one issue on sats overflow ([8a12004](8a120040e7))
        * off-by-one error in backward traversal ([d4128aa](d4128aa8a1))
        * off-by-one in sats number resolution ([42acbeb](42acbebcd5))
        * offset ([278a655](278a65524b))
        * only avoid override for blessed inscriptions ([b50bbc1](b50bbc1bf7))
        * optimize reg and dereg ([c2ec1b5](c2ec1b5283))
        * ordinals scans ([62b62bd](62b62bd98a))
        * outdated dockerfile ([771b036](771b0362b2))
        * outdated documentation ([f472a49](f472a49c42))
        * overriden inscriptions ([25c6441](25c6441404))
        * parsing ([1f047a9](1f047a9162))
        * patch absence of witness data ([f8fcfca](f8fcfcad6d))
        * patch boot latency ([0e3faf9](0e3faf9a61))
        * patch crach ([20d9df6](20d9df6c65))
        * patch db call ([d385df2](d385df2037))
        * pipeline logic ([a864c85](a864c85c33))
        * pipeline resuming ([06883c6](06883c655a))
        * ports ([3ee98a8](3ee98a8be9))
        * potential resolve coinbase spent ([5d26738](5d267380f7))
        * PoxInfo default for scan commands ([a00ccf5](a00ccf589a))
        * predicate documentation ([572cf20](572cf202ba))
        * predicate generator network ([8f0ae21](8f0ae216c8))
        * provide optional values ([2cbf87e](2cbf87ebcc))
        * re-apply initial fix ([f5cb516](f5cb516ee0))
        * re-arrange logs ([2857d0a](2857d0a1a4))
        * re-enable sleep ([0f61a26](0f61a26fda))
        * re-initiate inscriptions connection every 250 blocks ([39671f4](39671f4378))
        * re-qualify error to warn ([9431684](9431684afe))
        * re-wire cmd ([a1447ad](a1447ad277))
        * README ([db1d584](db1d584827))
        * recreate db conn on a regular basis ([81d8575](81d85759a4))
        * redis update ([d4889f1](d4889f16b7))
        * related issue ([4b3a0da](4b3a0daa43))
        * remove rocksdb reconnect ([f2b067e](f2b067e85e))
        * remove sleep ([c371e74](c371e74de7))
        * remove start logic ([a04711a](a04711ad7c))
        * remove support for p2wsh inscription reveal support ([4fe71f2](4fe71f2622))
        * remove symbols ([108117b](108117b82e))
        * remove thread_max * 2 ([359c6f9](359c6f9422))
        * reopen connect on failures ([3e15da5](3e15da5565))
        * reply with 500 on payload processing error ([eaa6d7b](eaa6d7b640))
        * report generation ([0dce12a](0dce12a4e2))
        * restore stable values ([fb5c591](fb5c591943))
        * return blocks to rollback in reverse order ([9fab5a3](9fab5a34a2))
        * reuse existing computation for fix ([222f7c3](222f7c3a14))
        * revert fix, avoid collision in traversals map ([dfcadec](dfcadec680))
        * revisit log level ([4168661](416866123a))
        * revisit transfer loop ([1f2151c](1f2151c098))
        * rocket_okapi version ([2af31a8](2af31a8e64))
        * safer db open, dockerfile ([43d37d7](43d37d73f2))
        * safer error handling ([11509e4](11509e4435))
        * sat offset computation ([b278b66](b278b66f84))
        * sats overflow handling ([a3f745c](a3f745cfa7))
        * schema for curse_type ([72d43c6](72d43c6b41))
        * serialize handlers in one thread ([cdfc264](cdfc264cff))
        * slow down initial configuration ([3096ad3](3096ad3b26))
        * sql query ([1a3bc42](1a3bc428ea))
        * sql query bis ([a479884](a4798848b1))
        * sql request ([6345df2](6345df2652))
        * sql table setup ([c8884a7](c8884a7dbe))
        * stack overflow ([aed7d5d](aed7d5d005))
        * stacks predicate format ([fcf9fb0](fcf9fb0e3f))
        * start_block off by one ([b99f7b0](b99f7b0011))
        * streamline txid handling ([ad48351](ad48351044))
        * test suite ([c7672f9](c7672f91a1))
        * test warns and errors ([0887d6b](0887d6b8ca))
        * threading model ([c9c43ae](c9c43ae3e3))
        * threading model ([c2354fc](c2354fcacd))
        * track interrupted scans ([2b51dca](2b51dca8f3))
        * transaction type schema ([c35a737](c35a737ed2))
        * transfer recomputing commit ([3643636](364363680f))
        * transfer tracking ([0ea85e3](0ea85e3d20))
        * transfer tracking ([30f299e](30f299ef7c))
        * transfer tracking ([0cd29f5](0cd29f5925))
        * transfer tracking + empty blocks ([dc94875](dc948755b2))
        * traversals algo ([e8ee3ab](e8ee3ab036))
        * tweak rocksdb options ([a0a6950](a0a69502d8))
        * typo ([b0498bb](b0498bb048))
        * typo ([baa773f](baa773ff4d))
        * unexpected expectation ([7dd362b](7dd362b4f5))
        * unify rosetta operation schemas ([bf3216b](bf3216b100))
        * unused imports ([3aab402](3aab4022ab))
        * update chainhook schema ([4e82714](4e8271491b))
        * update inscription_number ([89b94e7](89b94e7d5d))
        * update license ([6ebeb77](6ebeb77d6a))
        * update rust version in docker build ([fab6f69](fab6f69df5))
        * update spec status ([e268925](e2689255b7))
        * update/pin dependencies ([#311](https://github.com/hirosystems/ordhook/issues/311)) ([f54b374](f54b374b24)), closes [#310](https://github.com/hirosystems/ordhook/issues/310)
        * use first input to stick with ord spec interpretation / implementation ([206678f](206678f0d1))
        * use rpc instead of rest ([1b18818](1b188182f1))
        * zeromq, subsidy issue ([dbca70c](dbca70c197))

        ### Reverts

        * Revert "chore: tmp patch" ([3e022ca](3e022ca322))

    commit 4ef18d5b1e
    Merge: d111c44 4cde5e8
    Author: Scott McClellan <scott.mcclellan@gmail.com>
    Date:   Wed Sep 6 18:44:26 2023 -0500

        Merge pull request #168 from hirosystems/develop

        Merge up `develop` to `main`

    * fix: CI rust version mismatch, create empty db  (#173)

    * fix: create db if does not exists

    * chore: update rust version

    * chore: bump version to 1.0.1

    * fix: service boot sequence (#175)

    * fix: ci

    * fix: initial flow (#178)

    * chore: update chainhook-sdk + cascade changes

    * fix: update archive url

    * feat: only create rocksdb if sqlite present

    * fix: use crossbeam channel instead of std

    * fix: improve error message

    * doc: update README

    * fix: build warnings

    * fix: block_archiving expiration

    * fix: archive url

    * fix: read content len from http header

    * chore: untar sqlite file

    * chore: bump versions

    * fix: build error / warning

    * change title (#182)

    * Ordhook doc updates (#183)

    * update copy

    * add openapi reference file to ordhook docs for better context

    * typo in reference link for bitcoind

    * remove references to chainhook

    * break out guides for scanning and streaming ordinal activities

    * fix references to Ordhook.toml

    * update content for each guide

    * replace mentions of Chainhook

    ---------

    Co-authored-by: Max Efremov <51917427+mefrem@users.noreply.github.com>

    * provide a pointer to rust installation and a next steps linking to guides (#184)

    * update Ordhook.toml (#185)

    * fix: grammar tweaks

    grammar tweaks

    * fix: grammar tweaks

    Grammar tweaks

    * fix: grammar updates

    grammar updates

    Co-authored-by: Ludo Galabru <ludo@hiro.so>

    * doc: update getting-started

    Co-authored-by: Ludo Galabru <ludo@hiro.so>

    * doc: update overview.md

    Updated grammar

    Co-authored-by: Ludo Galabru <ludo@hiro.so>

    * feat: ordhook-sdk-js refactoring (#186)

    ---------

    Co-authored-by: Scott McClellan <scott.mcclellan@gmail.com>
    Co-authored-by: Ryan <ryan.waits@gmail.com>
    Co-authored-by: Max Efremov <51917427+mefrem@users.noreply.github.com>
    Co-authored-by: max-crawford <102705427+max-crawford@users.noreply.github.com>

* chore: update Cargo.lock

* chore: cargo fmt

* fix: merge artifact

* fix: rocksdb fine tuning

* fix: Cargo.toml warns

* feat: auto-repair at boot

* fix: rocksdb conn handling

* fix: improve backoff strategy

* chore: refactor BlockBytesCursor usage

* fix: update sequence_metadata when augmenting block

* feat: revisit observers handling

# Conflicts:
#	components/ordhook-core/src/service/mod.rs

* fix: aborted loop

* fix: stateful observers

* fix: build warnings

* chore: tweak db config

* fix: include coinbase output offset

* chore: tweak cli

* fix: iterator usage

* chore: disable blobdb

* fix: broken test

* chore: update dependencies

* chore: augment db schema

* chore: update ord

* chore: inscription parsing re-integration

* feat: persist inscription_subindex

* feat: update sequence_metadata

* fix: sql field name

* fix: build warnings

* chore: update chainhook sdk

* fix: indexing subindex

* fix: tx chaining reinscription handling

* chore: add missing indexes

* chore: restore set_max_open_files

* fix: dockerfile

* feat: panic when unable to retrieve ordinal number

* chore: disable integrity check for now

* chore: exit process on error

* fix: attempt to fix race condition

* chore: disable cache tmp

* fix: patch traversals can be reused

* fix: patch traversals can be reused

* chore: fix expectations

* fix: replay

* bump: major version
This commit is contained in:
Ludo Galabru
2024-01-04 22:10:27 -05:00
committed by GitHub
parent 0922265008
commit 424f5bb98c
28 changed files with 36217 additions and 1076 deletions

578
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
[package]
name = "ordhook-cli"
version = "1.0.2"
version = "2.0.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
@@ -14,7 +14,11 @@ num_cpus = "1.16.0"
serde = "1"
serde_json = "1"
serde_derive = "1"
reqwest = { version = "0.11", default-features = false, features = ["stream", "json", "rustls-tls"] }
reqwest = { version = "0.11", default-features = false, features = [
"stream",
"json",
"rustls-tls",
] }
hiro-system-kit = "0.3.1"
clap = { version = "3.2.23", features = ["derive"], optional = true }
clap_generate = { version = "3.0.3", optional = true }

View File

@@ -23,13 +23,14 @@ use ordhook::core::protocol::inscription_parsing::parse_inscriptions_and_standar
use ordhook::core::protocol::satoshi_numbering::compute_satoshi_number;
use ordhook::db::{
delete_data_in_ordhook_db, find_all_inscription_transfers, find_all_inscriptions_in_block,
find_all_transfers_in_block, find_inscription_with_id, find_last_block_inserted,
find_latest_inscription_block_height, find_lazy_block_at_block_height,
find_all_transfers_in_block, find_block_bytes_at_block_height, find_inscription_with_id,
find_last_block_inserted, find_latest_inscription_block_height, find_missing_blocks,
get_default_ordhook_db_file_path, initialize_ordhook_db, open_ordhook_db_conn_rocks_db_loop,
open_readonly_ordhook_db_conn, open_readonly_ordhook_db_conn_rocks_db,
open_readwrite_ordhook_db_conn,
open_readwrite_ordhook_db_conn, BlockBytesCursor,
};
use ordhook::download::download_ordinals_dataset_if_required;
use ordhook::hex;
use ordhook::scan::bitcoin::scan_bitcoin_chainstate_via_rpc_using_predicate;
use ordhook::service::{start_observer_forwarding, Service};
use reqwest::Client as HttpClient;
@@ -577,7 +578,7 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
let mut total_transfers_in_block = 0;
for (_, inscription) in inscriptions.iter() {
println!("Inscription {} revealed at block #{} (inscription_number {}, ordinal_number {})", inscription.get_inscription_id(), block_height, inscription.inscription_number, inscription.ordinal_number);
println!("Inscription {} revealed at block #{} (inscription_number {}, ordinal_number {})", inscription.get_inscription_id(), block_height, inscription.inscription_number.jubilee, inscription.ordinal_number);
if let Some(transfers) = locations.remove(&inscription.get_inscription_id())
{
for t in transfers.iter().skip(1) {
@@ -639,7 +640,7 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
"Inscription {} revealed at block #{} (inscription_number {}, ordinal_number {})",
inscription.get_inscription_id(),
block_height,
inscription.inscription_number,
inscription.inscription_number.jubilee,
inscription.ordinal_number
);
let transfers = find_all_inscription_transfers(
@@ -668,15 +669,19 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
.await?;
let transaction_identifier = TransactionIdentifier::new(&cmd.transaction_id);
let cache = new_traversals_lazy_cache(100);
let res = compute_satoshi_number(
let (res, mut back_trace) = compute_satoshi_number(
&config.get_ordhook_config().db_path,
&block.block_identifier,
&transaction_identifier,
0,
0,
&Arc::new(cache),
true,
ctx,
)?;
back_trace.reverse();
for (block_height, tx) in back_trace.iter() {
println!("{}\t{}", block_height, hex::encode(tx));
}
println!("{:?}", res);
}
Command::Service(subcmd) => match subcmd {
@@ -793,9 +798,10 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
ctx,
);
for i in cmd.get_blocks().into_iter() {
let block =
find_lazy_block_at_block_height(i as u32, 10, false, &blocks_db, ctx)
let block_bytes =
find_block_bytes_at_block_height(i as u32, 10, &blocks_db, ctx)
.expect("unable to retrieve block {i}");
let block = BlockBytesCursor::new(&block_bytes);
info!(ctx.expect_logger(), "--------------------");
info!(ctx.expect_logger(), "Block: {i}");
for tx in block.iter_tx() {
@@ -861,18 +867,9 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
{
let blocks_db =
open_readonly_ordhook_db_conn_rocks_db(&config.expected_cache_path(), ctx)?;
let tip = find_last_block_inserted(&blocks_db) as u64;
let tip = find_last_block_inserted(&blocks_db);
println!("Tip: {}", tip);
let mut missing_blocks = vec![];
for i in cmd.start_block..=cmd.end_block {
if find_lazy_block_at_block_height(i as u32, 0, false, &blocks_db, ctx)
.is_none()
{
println!("Missing block #{i}");
missing_blocks.push(i);
}
}
let missing_blocks = find_missing_blocks(&blocks_db, 1, tip, ctx);
println!("{:?}", missing_blocks);
}
}

View File

@@ -50,6 +50,7 @@ impl ConfigFile {
"devnet" => (StacksNetwork::Devnet, BitcoinNetwork::Regtest),
"testnet" => (StacksNetwork::Testnet, BitcoinNetwork::Testnet),
"mainnet" => (StacksNetwork::Mainnet, BitcoinNetwork::Mainnet),
"signet" => (StacksNetwork::Testnet, BitcoinNetwork::Signet),
_ => return Err("network.mode not supported".to_string()),
};

View File

@@ -1,6 +1,6 @@
[package]
name = "ordhook"
version = "0.5.0"
version = "0.6.0"
edition = "2021"
[dependencies]
@@ -10,15 +10,15 @@ serde_json = "1"
serde_derive = "1"
hex = "0.4.3"
rand = "0.8.5"
chainhook-sdk = { version = "0.11.0", features = ["zeromq"] }
# chainhook-sdk = { version = "=0.10.1", path = "../../../chainhook/components/chainhook-sdk", default-features = false, features = ["zeromq", "log"] }
chainhook-sdk = { version = "=0.12.0", features = ["zeromq"] }
# chainhook-sdk = { version = "=0.12.0", path = "../../../chainhook/components/chainhook-sdk", features = ["zeromq"] }
hiro-system-kit = "0.3.1"
reqwest = { version = "0.11", default-features = false, features = ["stream", "json", "rustls-tls"] }
tokio = { version = "=1.24", features = ["full"] }
tokio = { version = "1.35.1", features = ["full"] }
futures-util = "0.3.24"
flate2 = "1.0.24"
tar = "0.4.38"
flume = "0.10.14"
flume = "0.11.0"
ansi_term = "0.12.1"
atty = "0.2.14"
crossbeam-channel = "0.5.8"
@@ -35,6 +35,8 @@ progressing = '3'
futures = "0.3.28"
rocksdb = { version = "0.21.0", default-features = false, features = ["snappy"] }
pprof = { version = "0.13.0", features = ["flamegraph"], optional = true }
hyper = { version = "=0.14.27" }
lazy_static = { version = "1.4.0"}
# [profile.release]
# debug = true

View File

@@ -14,7 +14,7 @@ use chainhook_sdk::{
use crate::{
config::{Config, LogConfig},
db::{find_lazy_block_at_block_height, open_ordhook_db_conn_rocks_db_loop},
db::{find_pinned_block_bytes_at_block_height, open_ordhook_db_conn_rocks_db_loop},
};
use crate::db::{
@@ -22,7 +22,7 @@ use crate::db::{
open_readonly_ordhook_db_conn,
};
use crate::db::LazyBlockTransaction;
use crate::db::TransactionBytesCursor;
#[derive(Clone, Debug)]
pub struct OrdhookConfig {
@@ -44,11 +44,11 @@ pub fn new_traversals_cache(
pub fn new_traversals_lazy_cache(
cache_size: usize,
) -> DashMap<(u32, [u8; 8]), LazyBlockTransaction, BuildHasherDefault<FxHasher>> {
) -> DashMap<(u32, [u8; 8]), TransactionBytesCursor, BuildHasherDefault<FxHasher>> {
let hasher = FxBuildHasher::default();
DashMap::with_capacity_and_hasher(
((cache_size.saturating_sub(500)) * 1000 * 1000)
.div(LazyBlockTransaction::get_average_bytes_size()),
.div(TransactionBytesCursor::get_average_bytes_size()),
hasher,
)
}
@@ -139,7 +139,7 @@ pub fn should_sync_ordhook_db(
match find_latest_inscription_block_height(&inscriptions_db_conn, ctx)? {
Some(height) => {
if find_lazy_block_at_block_height(height as u32, 3, false, &blocks_db, &ctx).is_none()
if find_pinned_block_bytes_at_block_height(height as u32, 3, &blocks_db, &ctx).is_none()
{
start_block = start_block.min(height);
} else {

View File

@@ -10,7 +10,7 @@ use std::time::Duration;
use tokio::task::JoinSet;
use crate::config::Config;
use crate::db::LazyBlock;
use crate::db::BlockBytesCursor;
use chainhook_sdk::indexer::bitcoin::{
build_http_client, parse_downloaded_block, try_download_block_bytes_with_retry,
@@ -19,7 +19,7 @@ use chainhook_sdk::indexer::bitcoin::{
use super::protocol::inscription_parsing::parse_inscriptions_and_standardize_block;
pub enum PostProcessorCommand {
ProcessBlocks(Vec<(u64, LazyBlock)>, Vec<BitcoinBlockData>),
ProcessBlocks(Vec<(u64, Vec<u8>)>, Vec<BitcoinBlockData>),
Terminate,
}
@@ -111,7 +111,7 @@ pub async fn download_and_pipeline_blocks(
while let Ok(Some(block_bytes)) = rx.recv() {
let raw_block_data =
parse_downloaded_block(block_bytes).expect("unable to parse block");
let compressed_block = LazyBlock::from_full_block(&raw_block_data)
let compressed_block = BlockBytesCursor::from_full_block(&raw_block_data)
.expect("unable to compress block");
let block_height = raw_block_data.height as u64;
let block_data = if block_height >= start_sequencing_blocks_at_height {
@@ -177,7 +177,7 @@ pub async fn download_and_pipeline_blocks(
}
}
None => {
stop_runloop = true;
break;
}
}
}
@@ -195,9 +195,9 @@ pub async fn download_and_pipeline_blocks(
let mut ooo_compacted_blocks = vec![];
for (block_height, block_opt, compacted_block) in new_blocks.into_iter() {
if let Some(block) = block_opt {
inbox.insert(block_height, (block, compacted_block));
inbox.insert(block_height, (block, compacted_block.to_vec()));
} else {
ooo_compacted_blocks.push((block_height, compacted_block));
ooo_compacted_blocks.push((block_height, compacted_block.to_vec()));
}
}

View File

@@ -9,7 +9,7 @@ use std::{
use crate::{
config::Config,
core::pipeline::{PostProcessorCommand, PostProcessorController, PostProcessorEvent},
db::{insert_entry_in_blocks, open_ordhook_db_conn_rocks_db_loop, LazyBlock},
db::{insert_entry_in_blocks, open_ordhook_db_conn_rocks_db_loop},
};
pub fn start_block_archiving_processor(
@@ -72,7 +72,7 @@ pub fn start_block_archiving_processor(
}
pub fn store_compacted_blocks(
mut compacted_blocks: Vec<(u64, LazyBlock)>,
mut compacted_blocks: Vec<(u64, Vec<u8>)>,
update_tip: bool,
blocks_db_rw: &DB,
ctx: &Context,

View File

@@ -38,7 +38,7 @@ use crate::{
},
};
use crate::db::{LazyBlockTransaction, TraversalResult};
use crate::db::{TransactionBytesCursor, TraversalResult};
use crate::{
config::Config,
@@ -68,8 +68,6 @@ pub fn start_inscription_indexing_processor(
let mut inscriptions_db_conn_rw =
open_readwrite_ordhook_db_conn(&config.expected_cache_path(), &ctx).unwrap();
let ordhook_config = config.get_ordhook_config();
let blocks_db_rw =
open_ordhook_db_conn_rocks_db_loop(true, &config.expected_cache_path(), &ctx);
let mut empty_cycles = 0;
let inscriptions_db_conn =
@@ -105,11 +103,12 @@ pub fn start_inscription_indexing_processor(
},
};
// Early return
if blocks.is_empty() {
store_compacted_blocks(compacted_blocks, true, &blocks_db_rw, &ctx);
continue;
} else {
{
let blocks_db_rw = open_ordhook_db_conn_rocks_db_loop(
true,
&config.expected_cache_path(),
&ctx,
);
store_compacted_blocks(
compacted_blocks,
true,
@@ -118,8 +117,12 @@ pub fn start_inscription_indexing_processor(
);
}
ctx.try_log(|logger| info!(logger, "Processing {} blocks", blocks.len()));
// Early return
if blocks.is_empty() {
continue;
}
ctx.try_log(|logger| info!(logger, "Processing {} blocks", blocks.len()));
blocks = process_blocks(
&mut blocks,
&mut sequence_cursor,
@@ -131,7 +134,6 @@ pub fn start_inscription_indexing_processor(
);
garbage_collect_nth_block += blocks.len();
if garbage_collect_nth_block > garbage_collect_every_n_blocks {
ctx.try_log(|logger| info!(logger, "Performing garbage collecting"));
@@ -162,7 +164,7 @@ pub fn start_inscription_indexing_processor(
pub fn process_blocks(
next_blocks: &mut Vec<BitcoinBlockData>,
sequence_cursor: &mut SequenceCursor,
cache_l2: &Arc<DashMap<(u32, [u8; 8]), LazyBlockTransaction, BuildHasherDefault<FxHasher>>>,
cache_l2: &Arc<DashMap<(u32, [u8; 8]), TransactionBytesCursor, BuildHasherDefault<FxHasher>>>,
inscriptions_db_conn_rw: &mut Connection,
ordhook_config: &OrdhookConfig,
post_processor: &Option<Sender<BitcoinBlockData>>,
@@ -199,7 +201,7 @@ pub fn process_blocks(
let inscriptions_revealed = get_inscriptions_revealed_in_block(&block)
.iter()
.map(|d| d.inscription_number.to_string())
.map(|d| d.get_inscription_number().to_string())
.collect::<Vec<String>>();
let inscriptions_transferred = get_inscriptions_transferred_in_block(&block).len();
@@ -259,7 +261,7 @@ pub fn process_block(
next_blocks: &Vec<BitcoinBlockData>,
sequence_cursor: &mut SequenceCursor,
cache_l1: &mut BTreeMap<(TransactionIdentifier, usize), TraversalResult>,
cache_l2: &Arc<DashMap<(u32, [u8; 8]), LazyBlockTransaction, BuildHasherDefault<FxHasher>>>,
cache_l2: &Arc<DashMap<(u32, [u8; 8]), TransactionBytesCursor, BuildHasherDefault<FxHasher>>>,
inscriptions_db_tx: &Transaction,
ordhook_config: &OrdhookConfig,
ctx: &Context,

View File

@@ -1,317 +1,89 @@
use std::collections::BTreeMap;
use std::str::FromStr;
use chainhook_sdk::bitcoincore_rpc_json::bitcoin::hashes::hex::FromHex;
use chainhook_sdk::bitcoincore_rpc_json::bitcoin::Txid;
use chainhook_sdk::indexer::bitcoin::BitcoinTransactionFullBreakdown;
use chainhook_sdk::indexer::bitcoin::{standardize_bitcoin_block, BitcoinBlockFullBreakdown};
use chainhook_sdk::types::{
BitcoinBlockData, BitcoinNetwork, BitcoinTransactionData, OrdinalInscriptionCurseType,
OrdinalInscriptionRevealData, OrdinalInscriptionTransferData, OrdinalOperation,
OrdinalInscriptionNumber, OrdinalInscriptionRevealData, OrdinalInscriptionTransferData,
OrdinalOperation,
};
use chainhook_sdk::utils::Context;
use chainhook_sdk::{
bitcoincore_rpc::bitcoin::Transaction, indexer::bitcoin::BitcoinTransactionFullBreakdown,
};
use std::collections::BTreeMap;
use std::str::FromStr;
use crate::ord::envelope::{Envelope, ParsedEnvelope, RawEnvelope};
use crate::ord::inscription::Inscription;
use crate::ord::inscription_id::InscriptionId;
use {
chainhook_sdk::bitcoincore_rpc::bitcoin::{
blockdata::{
opcodes,
script::{self, Instruction, Instructions},
},
util::taproot::TAPROOT_ANNEX_PREFIX,
Script, Witness,
},
std::{iter::Peekable, str},
};
const PROTOCOL_ID: &[u8] = b"ord";
const BODY_TAG: &[u8] = &[];
const CONTENT_TYPE_TAG: &[u8] = &[1];
#[derive(Debug, PartialEq, Clone)]
pub struct Inscription {
pub body: Option<Vec<u8>>,
pub content_type: Option<Vec<u8>>,
pub curse: Option<OrdinalInscriptionCurseType>,
}
impl Inscription {
pub fn from_transaction(tx: &Transaction) -> Option<Inscription> {
InscriptionParser::parse(&tx.input.get(0)?.witness).ok()
}
pub(crate) fn body(&self) -> Option<&[u8]> {
Some(self.body.as_ref()?)
}
pub(crate) fn content_type(&self) -> Option<&str> {
str::from_utf8(self.content_type.as_ref()?).ok()
}
}
#[derive(Debug, PartialEq)]
pub enum InscriptionError {
EmptyWitness,
InvalidInscription,
KeyPathSpend,
NoInscription,
Script(script::Error),
UnrecognizedEvenField,
}
type Result<T, E = InscriptionError> = std::result::Result<T, E>;
pub struct InscriptionParser<'a> {
pub instructions: Peekable<Instructions<'a>>,
}
impl<'a> InscriptionParser<'a> {
pub fn parse(witness: &Witness) -> Result<Inscription> {
if witness.is_empty() {
return Err(InscriptionError::EmptyWitness);
}
if witness.len() == 1 {
return Err(InscriptionError::KeyPathSpend);
}
let annex = witness
.last()
.and_then(|element| element.first().map(|byte| *byte == TAPROOT_ANNEX_PREFIX))
.unwrap_or(false);
if witness.len() == 2 && annex {
return Err(InscriptionError::KeyPathSpend);
}
let script = witness
.iter()
.nth(if annex {
witness.len() - 1
} else {
witness.len() - 2
})
.unwrap();
InscriptionParser {
instructions: Script::from(Vec::from(script)).instructions().peekable(),
}
.parse_script()
}
pub fn parse_script(mut self) -> Result<Inscription> {
loop {
let next = self.advance()?;
if next == Instruction::PushBytes(&[]) {
if let Some(inscription) = self.parse_inscription()? {
return Ok(inscription);
}
}
}
}
fn advance(&mut self) -> Result<Instruction<'a>> {
self.instructions
.next()
.ok_or(InscriptionError::NoInscription)?
.map_err(InscriptionError::Script)
}
fn parse_inscription(&mut self) -> Result<Option<Inscription>> {
if self.advance()? == Instruction::Op(opcodes::all::OP_IF) {
if !self.accept(Instruction::PushBytes(PROTOCOL_ID))? {
return Err(InscriptionError::NoInscription);
}
let mut fields = BTreeMap::new();
loop {
match self.advance()? {
Instruction::PushBytes(BODY_TAG) => {
let mut body = Vec::new();
while !self.accept(Instruction::Op(opcodes::all::OP_ENDIF))? {
body.extend_from_slice(self.expect_push()?);
}
fields.insert(BODY_TAG, body);
break;
}
Instruction::PushBytes(tag) => {
if fields.contains_key(tag) {
return Err(InscriptionError::InvalidInscription);
}
fields.insert(tag, self.expect_push()?.to_vec());
}
Instruction::Op(opcodes::all::OP_ENDIF) => break,
_ => return Err(InscriptionError::InvalidInscription),
}
}
let body = fields.remove(BODY_TAG);
let content_type = fields.remove(CONTENT_TYPE_TAG);
for tag in fields.keys() {
if let Some(lsb) = tag.first() {
if lsb % 2 == 0 {
return Ok(Some(Inscription {
body,
content_type,
curse: Some(OrdinalInscriptionCurseType::Tag(*lsb)),
}));
}
}
}
return Ok(Some(Inscription {
body,
content_type,
curse: None,
}));
}
Ok(None)
}
fn expect_push(&mut self) -> Result<&'a [u8]> {
match self.advance()? {
Instruction::PushBytes(bytes) => Ok(bytes),
_ => Err(InscriptionError::InvalidInscription),
}
}
fn accept(&mut self, instruction: Instruction) -> Result<bool> {
match self.instructions.peek() {
Some(Ok(next)) => {
if *next == instruction {
self.advance()?;
Ok(true)
} else {
Ok(false)
}
}
Some(Err(err)) => Err(InscriptionError::Script(*err)),
None => Ok(false),
}
}
}
#[derive(Debug, PartialEq, Copy, Clone)]
pub(crate) enum Media {
Audio,
Iframe,
Image,
Pdf,
Text,
Unknown,
Video,
}
impl Media {
const TABLE: &'static [(&'static str, Media, &'static [&'static str])] = &[
("application/json", Media::Text, &["json"]),
("application/pdf", Media::Pdf, &["pdf"]),
("application/pgp-signature", Media::Text, &["asc"]),
("application/yaml", Media::Text, &["yaml", "yml"]),
("audio/flac", Media::Audio, &["flac"]),
("audio/mpeg", Media::Audio, &["mp3"]),
("audio/wav", Media::Audio, &["wav"]),
("image/apng", Media::Image, &["apng"]),
("image/avif", Media::Image, &[]),
("image/gif", Media::Image, &["gif"]),
("image/jpeg", Media::Image, &["jpg", "jpeg"]),
("image/png", Media::Image, &["png"]),
("image/svg+xml", Media::Iframe, &["svg"]),
("image/webp", Media::Image, &["webp"]),
("model/stl", Media::Unknown, &["stl"]),
("text/html;charset=utf-8", Media::Iframe, &["html"]),
("text/plain;charset=utf-8", Media::Text, &["txt"]),
("video/mp4", Media::Video, &["mp4"]),
("video/webm", Media::Video, &["webm"]),
];
}
impl FromStr for Media {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
for entry in Self::TABLE {
if entry.0 == s {
return Ok(entry.1);
}
}
Err("unknown content type: {s}".to_string())
}
}
use {chainhook_sdk::bitcoincore_rpc::bitcoin::Witness, std::str};
pub fn parse_inscriptions_from_witness(
input_index: usize,
witness_bytes: Vec<Vec<u8>>,
txid: &str,
) -> Option<OrdinalOperation> {
let witness = Witness::from_vec(witness_bytes.clone());
let mut inscription = match InscriptionParser::parse(&witness) {
Ok(inscription) => inscription,
Err(_e) => {
let mut cursed_inscription = None;
for bytes in witness_bytes.iter() {
let script = Script::from(bytes.to_vec());
let parser = InscriptionParser {
instructions: script.instructions().peekable(),
};
) -> Option<Vec<OrdinalInscriptionRevealData>> {
let witness = Witness::from_slice(&witness_bytes);
let tapscript = witness.tapscript()?;
let envelopes: Vec<Envelope<Inscription>> = RawEnvelope::from_tapscript(tapscript, input_index)
.ok()?
.into_iter()
.map(|e| ParsedEnvelope::from(e))
.collect();
let mut inscriptions = vec![];
for envelope in envelopes.into_iter() {
let curse_type = if envelope.payload.unrecognized_even_field {
Some(OrdinalInscriptionCurseType::UnrecognizedEvenField)
} else if envelope.payload.duplicate_field {
Some(OrdinalInscriptionCurseType::DuplicateField)
} else if envelope.payload.incomplete_field {
Some(OrdinalInscriptionCurseType::IncompleteField)
} else if envelope.input != 0 {
Some(OrdinalInscriptionCurseType::NotInFirstInput)
} else if envelope.offset != 0 {
Some(OrdinalInscriptionCurseType::NotAtOffsetZero)
} else if envelope.payload.pointer.is_some() {
Some(OrdinalInscriptionCurseType::Pointer)
} else if envelope.pushnum {
Some(OrdinalInscriptionCurseType::Pushnum)
} else if envelope.stutter {
Some(OrdinalInscriptionCurseType::Stutter)
} else {
None
};
let mut inscription = match parser.parse_script() {
Ok(inscription) => inscription,
Err(_) => continue,
};
inscription.curse = Some(OrdinalInscriptionCurseType::P2wsh);
cursed_inscription = Some(inscription);
break;
}
match cursed_inscription {
Some(inscription) => inscription,
None => return None,
}
}
};
let inscription_id = InscriptionId {
txid: Txid::from_str(txid).unwrap(),
index: input_index as u32,
};
let inscription_id = InscriptionId {
txid: Txid::from_hex(txid).unwrap(),
index: input_index as u32,
};
let no_content_bytes = vec![];
let inscription_content_bytes = envelope.payload.body().take().unwrap_or(&no_content_bytes);
let mut content_bytes = "0x".to_string();
content_bytes.push_str(&hex::encode(&inscription_content_bytes));
if input_index > 0 {
inscription.curse = Some(OrdinalInscriptionCurseType::Batch);
let reveal_data = OrdinalInscriptionRevealData {
content_type: envelope
.payload
.content_type()
.unwrap_or("unknown")
.to_string(),
content_bytes,
content_length: inscription_content_bytes.len(),
inscription_id: inscription_id.to_string(),
inscription_input_index: input_index,
tx_index: 0,
inscription_output_value: 0,
inscription_fee: 0,
inscription_number: OrdinalInscriptionNumber::zero(),
inscriber_address: None,
ordinal_number: 0,
ordinal_block_height: 0,
ordinal_offset: 0,
transfers_pre_inscription: 0,
satpoint_post_inscription: format!(""),
curse_type,
};
inscriptions.push(reveal_data);
}
let no_content_bytes = vec![];
let inscription_content_bytes = inscription.body().take().unwrap_or(&no_content_bytes);
let mut content_bytes = "0x".to_string();
content_bytes.push_str(&hex::encode(&inscription_content_bytes));
let payload = OrdinalInscriptionRevealData {
content_type: inscription.content_type().unwrap_or("unknown").to_string(),
content_bytes,
content_length: inscription_content_bytes.len(),
inscription_id: inscription_id.to_string(),
inscription_input_index: input_index,
tx_index: 0,
inscription_output_value: 0,
inscription_fee: 0,
inscription_number: 0,
inscriber_address: None,
ordinal_number: 0,
ordinal_block_height: 0,
ordinal_offset: 0,
transfers_pre_inscription: 0,
satpoint_post_inscription: format!(""),
curse_type: inscription.curse.take(),
};
Some(OrdinalOperation::InscriptionRevealed(payload))
Some(inscriptions)
}
pub fn parse_inscriptions_from_standardized_tx(
@@ -326,12 +98,14 @@ pub fn parse_inscriptions_from_standardized_tx(
.map(|w| hex::decode(&w[2..]).unwrap())
.collect();
if let Some(operation) = parse_inscriptions_from_witness(
if let Some(inscriptions) = parse_inscriptions_from_witness(
input_index,
witness_bytes,
tx.transaction_identifier.get_hash_bytes_str(),
) {
operations.push(operation);
for inscription in inscriptions.into_iter() {
operations.push(OrdinalOperation::InscriptionRevealed(inscription));
}
}
}
operations
@@ -349,32 +123,34 @@ pub fn parse_inscriptions_in_raw_tx(
.map(|w| hex::decode(w).unwrap())
.collect();
if let Some(operation) =
if let Some(inscriptions) =
parse_inscriptions_from_witness(input_index, witness_bytes, &tx.txid)
{
operations.push(operation);
for inscription in inscriptions.into_iter() {
operations.push(OrdinalOperation::InscriptionRevealed(inscription));
}
}
}
}
operations
}
#[test]
fn test_ordinal_inscription_parsing() {
let bytes = hex::decode("208737bc46923c3e64c7e6768c0346879468bf3aba795a5f5f56efca288f50ed2aac0063036f7264010118746578742f706c61696e3b636861727365743d7574662d38004c9948656c6c6f2030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030300a68").unwrap();
// #[test]
// fn test_ordinal_inscription_parsing() {
// let bytes = hex::decode("208737bc46923c3e64c7e6768c0346879468bf3aba795a5f5f56efca288f50ed2aac0063036f7264010118746578742f706c61696e3b636861727365743d7574662d38004c9948656c6c6f2030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030300a68").unwrap();
let script = Script::from(bytes);
let parser = InscriptionParser {
instructions: script.instructions().peekable(),
};
// let script = Script::from(bytes);
// let parser = InscriptionParser {
// instructions: script.instructions().peekable(),
// };
let inscription = match parser.parse_script() {
Ok(inscription) => inscription,
Err(_) => panic!(),
};
// let inscription = match parser.parse_script() {
// Ok(inscription) => inscription,
// Err(_) => panic!(),
// };
println!("{:?}", inscription);
}
// println!("{:?}", inscription);
// }
pub fn parse_inscriptions_and_standardize_block(
raw_block: BitcoinBlockFullBreakdown,

View File

@@ -5,10 +5,11 @@ use std::{
};
use chainhook_sdk::{
bitcoincore_rpc_json::bitcoin::{hashes::hex::FromHex, Address, Network, Script},
bitcoincore_rpc_json::bitcoin::{Address, Network, ScriptBuf},
types::{
BitcoinBlockData, BitcoinNetwork, BitcoinTransactionData, BlockIdentifier,
OrdinalInscriptionCurseType, OrdinalOperation, TransactionIdentifier,
OrdinalInscriptionCurseType, OrdinalInscriptionNumber, OrdinalOperation,
TransactionIdentifier,
},
utils::Context,
};
@@ -20,11 +21,10 @@ use rusqlite::{Connection, Transaction};
use crate::{
core::OrdhookConfig,
db::{
find_blessed_inscription_with_ordinal_number,
find_latest_cursed_inscription_number_at_block_height,
find_latest_inscription_number_at_block_height, format_satpoint_to_watch,
update_inscriptions_with_block, update_sequence_metadata_with_block, LazyBlockTransaction,
TraversalResult,
find_blessed_inscription_with_ordinal_number, find_nth_classic_neg_number_at_block_height,
find_nth_classic_pos_number_at_block_height, find_nth_jubilee_number_at_block_height,
format_inscription_id, format_satpoint_to_watch, update_inscriptions_with_block,
update_sequence_metadata_with_block, TransactionBytesCursor, TraversalResult,
},
ord::height::Height,
};
@@ -68,12 +68,18 @@ pub fn parallelize_inscription_data_computations(
block: &BitcoinBlockData,
next_blocks: &Vec<BitcoinBlockData>,
cache_l1: &mut BTreeMap<(TransactionIdentifier, usize), TraversalResult>,
cache_l2: &Arc<DashMap<(u32, [u8; 8]), LazyBlockTransaction, BuildHasherDefault<FxHasher>>>,
cache_l2: &Arc<DashMap<(u32, [u8; 8]), TransactionBytesCursor, BuildHasherDefault<FxHasher>>>,
inscriptions_db_tx: &Transaction,
ordhook_config: &OrdhookConfig,
ctx: &Context,
) -> Result<bool, String> {
ctx.try_log(|logger| {
let inner_ctx = if ordhook_config.logs.ordinals_internals {
ctx.clone()
} else {
Context::empty()
};
inner_ctx.try_log(|logger| {
info!(
logger,
"Inscriptions data computation for block #{} started", block.block_identifier.index
@@ -83,12 +89,6 @@ pub fn parallelize_inscription_data_computations(
let (mut transactions_ids, l1_cache_hits) =
get_transactions_to_process(block, cache_l1, inscriptions_db_tx, ctx);
let inner_ctx = if ordhook_config.logs.ordinals_internals {
ctx.clone()
} else {
Context::empty()
};
let has_transactions_to_process = !transactions_ids.is_empty() || !l1_cache_hits.is_empty();
let thread_max = ordhook_config.ingestion_thread_max;
@@ -118,13 +118,13 @@ pub fn parallelize_inscription_data_computations(
while let Ok(Some((transaction_id, block_identifier, input_index, prioritary))) =
rx.recv()
{
let traversal: Result<TraversalResult, String> = compute_satoshi_number(
let traversal: Result<(TraversalResult, _), String> = compute_satoshi_number(
&moved_ordhook_db_path,
&block_identifier,
&transaction_id,
input_index,
0,
&local_cache,
false,
&moved_ctx,
);
let _ = moved_traversal_tx.send((traversal, prioritary, thread_index));
@@ -134,11 +134,12 @@ pub fn parallelize_inscription_data_computations(
thread_pool_handles.push(handle);
}
// Consume L1 cache
// Consume L1 cache: if the traversal was performed in a previous round
// retrieve it and use it.
let mut thread_index = 0;
for key in l1_cache_hits.iter() {
if let Some(entry) = cache_l1.remove(key) {
let _ = traversal_tx.send((Ok(entry), true, thread_index));
if let Some(entry) = cache_l1.get(key) {
let _ = traversal_tx.send((Ok((entry.clone(), vec![])), true, thread_index));
thread_index = (thread_index + 1) % thread_max;
}
}
@@ -148,7 +149,7 @@ pub fn parallelize_inscription_data_computations(
.map(|b| format!("{}", b.block_identifier.index))
.collect::<Vec<_>>();
ctx.try_log(|logger| {
inner_ctx.try_log(|logger| {
info!(
logger,
"Number of inscriptions in block #{} to process: {} (L1 cache hits: {}, queue: [{}], L1 cache len: {}, L2 cache len: {})",
@@ -190,7 +191,7 @@ pub fn parallelize_inscription_data_computations(
traversals_received += 1;
}
match traversal_result {
Ok(traversal) => {
Ok((traversal, _)) => {
inner_ctx.try_log(|logger| {
info!(
logger,
@@ -231,7 +232,7 @@ pub fn parallelize_inscription_data_computations(
let (mut transactions_ids, _) =
get_transactions_to_process(next_block, cache_l1, inscriptions_db_tx, ctx);
ctx.try_log(|logger| {
inner_ctx.try_log(|logger| {
info!(
logger,
"Number of inscriptions in block #{} to pre-process: {}",
@@ -254,7 +255,7 @@ pub fn parallelize_inscription_data_computations(
}
}
}
ctx.try_log(|logger| {
inner_ctx.try_log(|logger| {
info!(
logger,
"Inscriptions data computation for block #{} collected", block.block_identifier.index
@@ -265,7 +266,7 @@ pub fn parallelize_inscription_data_computations(
for tx in tx_thread_pool.iter() {
// Empty the queue
if let Ok((traversal_result, _prioritary, thread_index)) = traversal_rx.try_recv() {
if let Ok(traversal) = traversal_result {
if let Ok((traversal, _)) = traversal_result {
inner_ctx.try_log(|logger| {
info!(
logger,
@@ -289,7 +290,7 @@ pub fn parallelize_inscription_data_computations(
let _ = tx.send(None);
}
let ctx_moved = ctx.clone();
let ctx_moved = inner_ctx.clone();
let _ = hiro_system_kit::thread_named("Garbage collection").spawn(move || {
ctx_moved.try_log(|logger| info!(logger, "Cleanup: threadpool deallocation started",));
@@ -299,7 +300,7 @@ pub fn parallelize_inscription_data_computations(
ctx_moved.try_log(|logger| info!(logger, "Cleanup: threadpool deallocation ended",));
});
ctx.try_log(|logger| {
inner_ctx.try_log(|logger| {
info!(
logger,
"Inscriptions data computation for block #{} ended", block.block_identifier.index
@@ -335,7 +336,7 @@ fn get_transactions_to_process(
let mut transactions_ids: Vec<(TransactionIdentifier, usize)> = vec![];
let mut l1_cache_hits = vec![];
let mut known_transactions =
let known_transactions =
find_all_inscriptions_in_block(&block.block_identifier.index, inscriptions_db_tx, ctx);
for tx in block.transactions.iter().skip(1) {
@@ -356,7 +357,7 @@ fn get_transactions_to_process(
continue;
}
if let Some(_) = known_transactions.remove(&key) {
if let Some(_) = known_transactions.get(&key) {
continue;
}
@@ -378,8 +379,9 @@ fn get_transactions_to_process(
/// use.
///
pub struct SequenceCursor<'a> {
blessed: Option<i64>,
cursed: Option<i64>,
pos_cursor: Option<i64>,
neg_cursor: Option<i64>,
jubilee_cursor: Option<i64>,
inscriptions_db_conn: &'a Connection,
current_block_height: u64,
}
@@ -387,41 +389,62 @@ pub struct SequenceCursor<'a> {
impl<'a> SequenceCursor<'a> {
pub fn new(inscriptions_db_conn: &'a Connection) -> SequenceCursor<'a> {
SequenceCursor {
blessed: None,
cursed: None,
jubilee_cursor: None,
pos_cursor: None,
neg_cursor: None,
inscriptions_db_conn,
current_block_height: 0,
}
}
pub fn reset(&mut self) {
self.blessed = None;
self.cursed = None;
self.pos_cursor = None;
self.neg_cursor = None;
self.jubilee_cursor = None;
self.current_block_height = 0;
}
pub fn pick_next(&mut self, cursed: bool, block_height: u64, ctx: &Context) -> i64 {
pub fn pick_next(
&mut self,
cursed: bool,
block_height: u64,
network: &Network,
ctx: &Context,
) -> OrdinalInscriptionNumber {
if block_height < self.current_block_height {
self.reset();
}
self.current_block_height = block_height;
match cursed {
true => self.pick_next_cursed(ctx),
false => self.pick_next_blessed(ctx),
}
let classic = match cursed {
true => self.pick_next_neg_number(ctx),
false => self.pick_next_pos_number(ctx),
};
let jubilee_height = match network {
Network::Bitcoin => 824544,
Network::Regtest => 110,
Network::Signet => 175392,
Network::Testnet => 2544192,
_ => unreachable!(),
};
let jubilee = if block_height >= jubilee_height {
self.pick_next_jubilee_number(ctx)
} else {
classic
};
OrdinalInscriptionNumber { classic, jubilee }
}
fn pick_next_blessed(&mut self, ctx: &Context) -> i64 {
match self.blessed {
fn pick_next_pos_number(&mut self, ctx: &Context) -> i64 {
match self.pos_cursor {
None => {
match find_latest_inscription_number_at_block_height(
match find_nth_classic_pos_number_at_block_height(
&self.current_block_height,
&self.inscriptions_db_conn,
&ctx,
) {
Some(inscription_number) => {
self.blessed = Some(inscription_number);
self.pos_cursor = Some(inscription_number);
inscription_number + 1
}
_ => 0,
@@ -431,16 +454,35 @@ impl<'a> SequenceCursor<'a> {
}
}
fn pick_next_cursed(&mut self, ctx: &Context) -> i64 {
match self.cursed {
fn pick_next_jubilee_number(&mut self, ctx: &Context) -> i64 {
match self.pos_cursor {
None => {
match find_latest_cursed_inscription_number_at_block_height(
match find_nth_jubilee_number_at_block_height(
&self.current_block_height,
&self.inscriptions_db_conn,
&ctx,
) {
Some(inscription_number) => {
self.cursed = Some(inscription_number);
self.jubilee_cursor = Some(inscription_number);
inscription_number + 1
}
_ => 0,
}
}
Some(value) => value + 1,
}
}
fn pick_next_neg_number(&mut self, ctx: &Context) -> i64 {
match self.neg_cursor {
None => {
match find_nth_classic_neg_number_at_block_height(
&self.current_block_height,
&self.inscriptions_db_conn,
&ctx,
) {
Some(inscription_number) => {
self.neg_cursor = Some(inscription_number);
inscription_number - 1
}
_ => -1,
@@ -450,12 +492,16 @@ impl<'a> SequenceCursor<'a> {
}
}
pub fn increment_cursed(&mut self, ctx: &Context) {
self.cursed = Some(self.pick_next_cursed(ctx));
pub fn increment_neg_cursor(&mut self, ctx: &Context) {
self.neg_cursor = Some(self.pick_next_neg_number(ctx));
}
pub fn increment_blessed(&mut self, ctx: &Context) {
self.blessed = Some(self.pick_next_blessed(ctx))
pub fn increment_pos_number(&mut self, ctx: &Context) {
self.pos_cursor = Some(self.pick_next_pos_number(ctx))
}
pub fn increment_jubilee_number(&mut self, ctx: &Context) {
self.jubilee_cursor = Some(self.pick_next_jubilee_number(ctx))
}
}
@@ -549,13 +595,13 @@ pub fn augment_block_with_ordinals_inscriptions_data(
};
let is_curse = inscription_data.curse_type.is_some();
let inscription_number =
sequence_cursor.pick_next(is_curse, block.block_identifier.index, &ctx);
sequence_cursor.pick_next(is_curse, block.block_identifier.index, &network, &ctx);
inscription_data.inscription_number = inscription_number;
if is_curse {
sequence_cursor.increment_cursed(ctx);
sequence_cursor.increment_neg_cursor(ctx);
} else {
sequence_cursor.increment_blessed(ctx);
sequence_cursor.increment_pos_number(ctx);
};
ctx.try_log(|logger| {
@@ -563,7 +609,7 @@ pub fn augment_block_with_ordinals_inscriptions_data(
logger,
"Unbound inscription {} (#{}) detected on Satoshi {} (block #{}, {} transfers)",
inscription_data.inscription_id,
inscription_data.inscription_number,
inscription_data.get_inscription_number(),
inscription_data.ordinal_number,
block.block_identifier.index,
inscription_data.transfers_pre_inscription,
@@ -591,8 +637,7 @@ fn augment_transaction_with_ordinals_inscriptions_data(
ctx: &Context,
) -> bool {
let any_event = tx.metadata.ordinal_operations.is_empty() == false;
let mut ordinals_ops_indexes_to_discard = VecDeque::new();
let mut inscription_subindex = 0;
for (op_index, op) in tx.metadata.ordinal_operations.iter_mut().enumerate() {
let (mut is_cursed, inscription) = match op {
OrdinalOperation::InscriptionRevealed(inscription) => {
@@ -602,26 +647,26 @@ fn augment_transaction_with_ordinals_inscriptions_data(
};
let transaction_identifier = tx.transaction_identifier.clone();
let inscription_id = format_inscription_id(&transaction_identifier, inscription_subindex);
let traversal = match inscriptions_data
.remove(&(transaction_identifier, inscription.inscription_input_index))
.get(&(transaction_identifier, inscription.inscription_input_index))
{
Some(traversal) => traversal,
None => {
let err_msg = format!(
"Unable to retrieve backward traversal result for inscription {}",
tx.transaction_identifier.hash
);
ctx.try_log(|logger| {
error!(
logger,
"Unable to retrieve cached inscription data for inscription {}",
tx.transaction_identifier.hash
);
error!(logger, "{}", err_msg);
});
ordinals_ops_indexes_to_discard.push_front(op_index);
continue;
std::process::exit(1);
}
};
// Do we need to curse the inscription?
let mut inscription_number =
sequence_cursor.pick_next(is_cursed, block_identifier.index, ctx);
sequence_cursor.pick_next(is_cursed, block_identifier.index, network, ctx);
let mut curse_type_override = None;
if !is_cursed {
// Is this inscription re-inscribing an existing blessed inscription?
@@ -640,13 +685,14 @@ fn augment_transaction_with_ordinals_inscriptions_data(
is_cursed = true;
inscription_number =
sequence_cursor.pick_next(is_cursed, block_identifier.index, ctx);
sequence_cursor.pick_next(is_cursed, block_identifier.index, network, ctx);
curse_type_override = Some(OrdinalInscriptionCurseType::Reinscription)
}
};
let outputs = &tx.metadata.outputs;
inscription.inscription_id = inscription_id;
inscription.inscription_number = inscription_number;
let outputs = &tx.metadata.outputs;
inscription.ordinal_offset = traversal.get_ordinal_coinbase_offset();
inscription.ordinal_block_height = traversal.get_ordinal_coinbase_height();
inscription.ordinal_number = traversal.ordinal_number;
@@ -666,7 +712,7 @@ fn augment_transaction_with_ordinals_inscriptions_data(
inscription.inscription_output_value = output.value;
inscription.inscriber_address = {
let script_pub_key = output.get_script_pubkey_hex();
match Script::from_hex(&script_pub_key) {
match ScriptBuf::from_hex(&script_pub_key) {
Ok(script) => match Address::from_script(&script, network.clone()) {
Ok(a) => Some(a.to_string()),
_ => None,
@@ -703,7 +749,7 @@ fn augment_transaction_with_ordinals_inscriptions_data(
logger,
"Inscription {} (#{}) detected on Satoshi {} (block #{}, {} transfers)",
inscription.inscription_id,
inscription.inscription_number,
inscription.get_inscription_number(),
inscription.ordinal_number,
block_identifier.index,
inscription.transfers_pre_inscription,
@@ -711,10 +757,11 @@ fn augment_transaction_with_ordinals_inscriptions_data(
});
if is_cursed {
sequence_cursor.increment_cursed(ctx);
sequence_cursor.increment_neg_cursor(ctx);
} else {
sequence_cursor.increment_blessed(ctx);
sequence_cursor.increment_pos_number(ctx);
}
inscription_subindex += 1;
}
any_event
}
@@ -735,7 +782,7 @@ fn consolidate_transaction_with_pre_computed_inscription_data(
OrdinalOperation::InscriptionTransferred(_) => continue,
};
let Some(traversal) = inscriptions_data.remove(&(
let Some(traversal) = inscriptions_data.get(&(
tx.transaction_identifier.clone(),
inscription.inscription_input_index,
)) else {
@@ -745,7 +792,7 @@ fn consolidate_transaction_with_pre_computed_inscription_data(
inscription.ordinal_offset = traversal.get_ordinal_coinbase_offset();
inscription.ordinal_block_height = traversal.get_ordinal_coinbase_height();
inscription.ordinal_number = traversal.ordinal_number;
inscription.inscription_number = traversal.inscription_number;
inscription.inscription_number = traversal.inscription_number.clone();
inscription.transfers_pre_inscription = traversal.transfers;
inscription.inscription_fee = tx.metadata.fee;
inscription.tx_index = tx_index;
@@ -755,8 +802,8 @@ fn consolidate_transaction_with_pre_computed_inscription_data(
traversal.transfer_data.inscription_offset_intra_output,
);
if inscription.inscription_number < 0 {
inscription.curse_type = Some(OrdinalInscriptionCurseType::Unknown);
if inscription.inscription_number.classic < 0 {
inscription.curse_type = Some(OrdinalInscriptionCurseType::Generic);
}
if traversal
@@ -775,7 +822,7 @@ fn consolidate_transaction_with_pre_computed_inscription_data(
inscription.inscription_output_value = output.value;
inscription.inscriber_address = {
let script_pub_key = output.get_script_pubkey_hex();
match Script::from_hex(&script_pub_key) {
match ScriptBuf::from_hex(&script_pub_key) {
Ok(script) => match Address::from_script(&script, network.clone()) {
Ok(a) => Some(a.to_string()),
_ => None,
@@ -809,16 +856,18 @@ pub fn consolidate_block_with_pre_computed_ordinals_data(
let mut inscriptions_data = loop {
let results =
find_all_inscriptions_in_block(&block.block_identifier.index, inscriptions_db_tx, ctx);
if results.len() == expected_inscriptions_count {
break results;
// TODO: investigate, sporadically the set returned is empty, and requires a retry.
if results.is_empty() && expected_inscriptions_count > 0 {
ctx.try_log(|logger| {
warn!(
logger,
"Database retuning {} results instead of the expected {expected_inscriptions_count}",
results.len()
);
});
continue;
}
ctx.try_log(|logger| {
warn!(
logger,
"Database retuning {} results instead of the expected {expected_inscriptions_count}",
results.len()
);
});
break results;
};
for (tx_index, tx) in block.transactions.iter_mut().enumerate() {
// Add inscriptions data

View File

@@ -1,5 +1,5 @@
use chainhook_sdk::{
bitcoincore_rpc_json::bitcoin::{hashes::hex::FromHex, Address, Network, Script},
bitcoincore_rpc_json::bitcoin::{Address, Network, ScriptBuf},
types::{
BitcoinBlockData, BitcoinNetwork, BitcoinTransactionData, BlockIdentifier,
OrdinalInscriptionTransferData, OrdinalInscriptionTransferDestination, OrdinalOperation,
@@ -117,7 +117,7 @@ pub fn augment_transaction_with_ordinals_transfers_data(
format_outpoint_to_watch(&tx.transaction_identifier, output_index);
let script_pub_key_hex =
tx.metadata.outputs[output_index].get_script_pubkey_hex();
let updated_address = match Script::from_hex(&script_pub_key_hex) {
let updated_address = match ScriptBuf::from_hex(&script_pub_key_hex) {
Ok(script) => match Address::from_script(&script, network.clone()) {
Ok(address) => OrdinalInscriptionTransferDestination::Transferred(
address.to_string(),

View File

@@ -1,4 +1,4 @@
use chainhook_sdk::types::{BlockIdentifier, TransactionIdentifier};
use chainhook_sdk::types::{BlockIdentifier, OrdinalInscriptionNumber, TransactionIdentifier};
use chainhook_sdk::utils::Context;
use dashmap::DashMap;
use fxhash::FxHasher;
@@ -7,10 +7,11 @@ use std::path::PathBuf;
use std::sync::Arc;
use crate::db::{
find_lazy_block_at_block_height, open_ordhook_db_conn_rocks_db_loop, TransferData,
find_pinned_block_bytes_at_block_height, open_ordhook_db_conn_rocks_db_loop, BlockBytesCursor,
TransferData,
};
use crate::db::{LazyBlockTransaction, TraversalResult};
use crate::db::{TransactionBytesCursor, TraversalResult};
use crate::ord::height::Height;
pub fn compute_satoshi_number(
@@ -18,19 +19,19 @@ pub fn compute_satoshi_number(
block_identifier: &BlockIdentifier,
transaction_identifier: &TransactionIdentifier,
inscription_input_index: usize,
inscription_number: i64,
traversals_cache: &Arc<
DashMap<(u32, [u8; 8]), LazyBlockTransaction, BuildHasherDefault<FxHasher>>,
DashMap<(u32, [u8; 8]), TransactionBytesCursor, BuildHasherDefault<FxHasher>>,
>,
_back_tracking: bool,
ctx: &Context,
) -> Result<TraversalResult, String> {
) -> Result<(TraversalResult, Vec<(u32, [u8; 8])>), String> {
let mut inscription_offset_intra_output = 0;
let mut inscription_output_index: usize = 0;
let mut ordinal_offset = 0;
let mut ordinal_block_number = block_identifier.index as u32;
let txid = transaction_identifier.get_8_hash_bytes();
let mut blocks_db = open_ordhook_db_conn_rocks_db_loop(false, &blocks_db_dir, &ctx);
let mut back_track = vec![];
let blocks_db = open_ordhook_db_conn_rocks_db_loop(false, &blocks_db_dir, &ctx);
let (sats_ranges, inscription_offset_cross_outputs) = match traversals_cache
.get(&(block_identifier.index as u32, txid.clone()))
@@ -42,38 +43,28 @@ pub fn compute_satoshi_number(
tx.get_cumulated_sats_in_until_input_index(inscription_input_index),
)
}
None => {
let mut attempt = 0;
loop {
match find_lazy_block_at_block_height(
ordinal_block_number,
3,
false,
&blocks_db,
&ctx,
) {
None => {
if attempt < 3 {
attempt += 1;
blocks_db =
open_ordhook_db_conn_rocks_db_loop(false, &blocks_db_dir, &ctx);
} else {
return Err(format!("block #{ordinal_block_number} not in database"));
}
}
Some(block) => match block.find_and_serialize_transaction_with_txid(&txid) {
None => loop {
match find_pinned_block_bytes_at_block_height(ordinal_block_number, 3, &blocks_db, &ctx)
{
None => {
return Err(format!("block #{ordinal_block_number} not in database"));
}
Some(block_bytes) => {
let cursor = BlockBytesCursor::new(&block_bytes.as_ref());
match cursor.find_and_serialize_transaction_with_txid(&txid) {
Some(tx) => {
let sats_ranges = tx.get_sat_ranges();
let inscription_offset_cross_outputs =
tx.get_cumulated_sats_in_until_input_index(inscription_input_index);
traversals_cache.insert((ordinal_block_number, txid.clone()), tx);
back_track.push((ordinal_block_number, txid.clone()));
break (sats_ranges, inscription_offset_cross_outputs);
}
None => return Err(format!("txid not in block #{ordinal_block_number}")),
},
}
}
}
}
},
};
for (i, (min, max)) in sats_ranges.into_iter().enumerate() {
@@ -142,51 +133,56 @@ pub fn compute_satoshi_number(
transaction_identifier.hash
)
});
return Ok(TraversalResult {
inscription_number: 0,
ordinal_number: 0,
transfers: 0,
inscription_input_index,
transaction_identifier_inscription: transaction_identifier.clone(),
transfer_data: TransferData {
inscription_offset_intra_output,
transaction_identifier_location: transaction_identifier.clone(),
output_index: inscription_output_index,
tx_index: 0,
return Ok((
TraversalResult {
inscription_number: OrdinalInscriptionNumber::zero(),
ordinal_number: 0,
transfers: 0,
inscription_input_index,
transaction_identifier_inscription: transaction_identifier.clone(),
transfer_data: TransferData {
inscription_offset_intra_output,
transaction_identifier_location: transaction_identifier.clone(),
output_index: inscription_output_index,
tx_index: 0,
},
},
});
back_track,
));
}
}
let lazy_block = {
let mut attempt = 0;
let pinned_block_bytes = {
loop {
match find_lazy_block_at_block_height(
match find_pinned_block_bytes_at_block_height(
ordinal_block_number,
3,
false,
&blocks_db,
&ctx,
) {
Some(block) => break block,
None => {
if attempt < 3 {
attempt += 1;
blocks_db =
open_ordhook_db_conn_rocks_db_loop(false, &blocks_db_dir, &ctx);
} else {
return Err(format!("block #{ordinal_block_number} not in database"));
}
return Err(format!("block #{ordinal_block_number} not in database (traversing {} / {} in progress)", transaction_identifier.hash, block_identifier.index));
}
}
}
};
let coinbase_txid = lazy_block.get_coinbase_txid();
let block_cursor = BlockBytesCursor::new(pinned_block_bytes.as_ref());
let txid = tx_cursor.0;
let mut block_cursor_tx_iter = block_cursor.iter_tx();
let coinbase = block_cursor_tx_iter.next().expect("empty block");
// evaluate exit condition: did we reach the **final** coinbase transaction
if coinbase_txid.eq(&txid) {
if coinbase.txid.eq(&txid) {
let mut intra_coinbase_output_offset = 0;
for (index, output_value) in coinbase.outputs.iter().enumerate() {
if index == tx_cursor.1 {
break;
}
intra_coinbase_output_offset += output_value;
}
ordinal_offset += intra_coinbase_output_offset;
let subsidy = Height(ordinal_block_number.into()).subsidy();
if ordinal_offset < subsidy {
// Great!
@@ -196,7 +192,7 @@ pub fn compute_satoshi_number(
// loop over the transaction fees to detect the right range
let mut accumulated_fees = subsidy;
for tx in lazy_block.iter_tx() {
for tx in block_cursor_tx_iter {
let mut total_in = 0;
for input in tx.inputs.iter() {
total_in += input.txin_value;
@@ -232,7 +228,7 @@ pub fn compute_satoshi_number(
}
} else {
// isolate the target transaction
let lazy_tx = match lazy_block.find_and_serialize_transaction_with_txid(&txid) {
let lazy_tx = match block_cursor.find_and_serialize_transaction_with_txid(&txid) {
Some(entry) => entry,
None => {
ctx.try_log(|logger| {
@@ -261,6 +257,7 @@ pub fn compute_satoshi_number(
sats_in += input.txin_value;
if sats_out < sats_in {
back_track.push((ordinal_block_number, tx_cursor.0.clone()));
traversals_cache.insert((ordinal_block_number, tx_cursor.0), lazy_tx.clone());
ordinal_offset = sats_out - (sats_in - input.txin_value);
ordinal_block_number = input.block_height;
@@ -277,19 +274,22 @@ pub fn compute_satoshi_number(
transaction_identifier.hash
)
});
return Ok(TraversalResult {
inscription_number: 0,
ordinal_number: 0,
transfers: 0,
inscription_input_index,
transaction_identifier_inscription: transaction_identifier.clone(),
transfer_data: TransferData {
inscription_offset_intra_output,
transaction_identifier_location: transaction_identifier.clone(),
output_index: inscription_output_index,
tx_index: 0,
return Ok((
TraversalResult {
inscription_number: OrdinalInscriptionNumber::zero(),
ordinal_number: 0,
transfers: 0,
inscription_input_index,
transaction_identifier_inscription: transaction_identifier.clone(),
transfer_data: TransferData {
inscription_offset_intra_output,
transaction_identifier_location: transaction_identifier.clone(),
output_index: inscription_output_index,
tx_index: 0,
},
},
});
back_track,
));
}
}
}
@@ -297,17 +297,20 @@ pub fn compute_satoshi_number(
let height = Height(ordinal_block_number.into());
let ordinal_number = height.starting_sat().0 + ordinal_offset + inscription_offset_intra_output;
Ok(TraversalResult {
inscription_number,
ordinal_number,
transfers: hops,
inscription_input_index,
transaction_identifier_inscription: transaction_identifier.clone(),
transfer_data: TransferData {
inscription_offset_intra_output,
transaction_identifier_location: transaction_identifier.clone(),
output_index: inscription_output_index,
tx_index: 0,
Ok((
TraversalResult {
inscription_number: OrdinalInscriptionNumber::zero(),
ordinal_number,
transfers: hops,
inscription_input_index,
transaction_identifier_inscription: transaction_identifier.clone(),
transfer_data: TransferData {
inscription_offset_intra_output,
transaction_identifier_location: transaction_identifier.clone(),
output_index: inscription_output_index,
tx_index: 0,
},
},
})
back_track,
))
}

File diff suppressed because one or more lines are too long

View File

@@ -8,14 +8,14 @@ use std::{
use rand::{thread_rng, Rng};
use rocksdb::DB;
use rocksdb::{DBPinnableSlice, DB};
use rusqlite::{Connection, OpenFlags, ToSql, Transaction};
use std::io::Cursor;
use chainhook_sdk::{
indexer::bitcoin::BitcoinBlockFullBreakdown,
types::{
BitcoinBlockData, BlockIdentifier, OrdinalInscriptionRevealData,
BitcoinBlockData, BlockIdentifier, OrdinalInscriptionNumber, OrdinalInscriptionRevealData,
OrdinalInscriptionTransferData, TransactionIdentifier,
},
utils::Context,
@@ -59,9 +59,11 @@ pub fn initialize_ordhook_db(base_dir: &PathBuf, ctx: &Context) -> Connection {
if let Err(e) = conn.execute(
"CREATE TABLE IF NOT EXISTS inscriptions (
inscription_id TEXT NOT NULL PRIMARY KEY,
input_index INTEGER NOT NULL,
block_height INTEGER NOT NULL,
ordinal_number INTEGER NOT NULL,
inscription_number INTEGER NOT NULL
jubilee_inscription_number INTEGER NOT NULL,
classic_inscription_number INTEGER NOT NULL
)",
[],
) {
@@ -80,7 +82,14 @@ pub fn initialize_ordhook_db(base_dir: &PathBuf, ctx: &Context) -> Connection {
ctx.try_log(|logger| warn!(logger, "unable to query hord.sqlite: {}", e.to_string()));
}
if let Err(e) = conn.execute(
"CREATE INDEX IF NOT EXISTS index_inscriptions_on_inscription_number ON inscriptions(inscription_number);",
"CREATE INDEX IF NOT EXISTS index_inscriptions_on_jubilee_inscription_number ON inscriptions(jubilee_inscription_number);",
[],
) {
ctx.try_log(|logger| warn!(logger, "unable to query hord.sqlite: {}", e.to_string()));
}
if let Err(e) = conn.execute(
"CREATE INDEX IF NOT EXISTS index_inscriptions_on_classic_inscription_number ON inscriptions(classic_inscription_number);",
[],
) {
ctx.try_log(|logger| warn!(logger, "unable to query hord.sqlite: {}", e.to_string()));
@@ -134,8 +143,9 @@ pub fn initialize_ordhook_db(base_dir: &PathBuf, ctx: &Context) -> Connection {
if let Err(e) = conn.execute(
"CREATE TABLE IF NOT EXISTS sequence_metadata (
block_height INTEGER NOT NULL,
latest_cursed_inscription_number INTEGER NOT NULL,
latest_inscription_number INTEGER NOT NULL
nth_classic_pos_number INTEGER NOT NULL,
nth_classic_neg_number INTEGER NOT NULL,
nth_jubilee_number INTEGER NOT NULL
)",
[],
) {
@@ -240,11 +250,6 @@ fn get_default_ordhook_db_file_path_rocks_db(base_dir: &PathBuf) -> PathBuf {
fn rocks_db_default_options() -> rocksdb::Options {
let mut opts = rocksdb::Options::default();
opts.create_if_missing(true);
// opts.prepare_for_bulk_load();
// opts.set_compression_type(rocksdb::DBCompressionType::Lz4);
// opts.set_blob_compression_type(rocksdb::DBCompressionType::Lz4);
// opts.increase_parallelism(parallelism)
// Per rocksdb's documentation:
// If cache_index_and_filter_blocks is false (which is default),
// the number of index/filter blocks is controlled by option max_open_files.
@@ -252,7 +257,23 @@ fn rocks_db_default_options() -> rocksdb::Options {
// we recommend setting max_open_files to -1, which means infinity.
// This option will preload all filter and index blocks and will not need to maintain LRU of files.
// Setting max_open_files to -1 will get you the best possible performance.
opts.set_max_open_files(4096);
// Additional documentation:
// https://betterprogramming.pub/navigating-the-minefield-of-rocksdb-configuration-options-246af1e1d3f9
// opts.set_write_buffer_size(64 * 1024 * 1024);
// opts.set_blob_file_size(1 * 1024 * 1024 * 1024);
// opts.set_target_file_size_base(64 * 1024 * 1024);
opts.set_max_open_files(2048);
opts.create_if_missing(true);
// opts.optimize_for_point_lookup(1 * 1024 * 1024 * 1024);
// opts.set_level_zero_stop_writes_trigger(64);
// opts.set_level_zero_slowdown_writes_trigger(20);
// opts.set_enable_blob_files(true);
// opts.set_enable_blob_gc(true);
// opts.set_use_fsync(false);
// opts.set_bytes_per_sync(8388608);
// opts.set_compaction_style(DBCompactionStyle::Universal);
// opts.set_compression_type(rocksdb::DBCompressionType::Lz4);
// opts.set_blob_compression_type(rocksdb::DBCompressionType::Lz4);
opts
}
@@ -287,8 +308,11 @@ pub fn open_ordhook_db_conn_rocks_db_loop(
retries += 1;
if retries > 10 {
ctx.try_log(|logger| {
warn!(logger, "Unable to open db: {e}",);
warn!(logger, "Unable to open db: {e}. Retrying in 10s",);
});
sleep(Duration::from_secs(10));
} else {
sleep(Duration::from_secs(2));
}
continue;
}
@@ -319,7 +343,7 @@ fn open_readwrite_ordhook_db_conn_rocks_db(
pub fn insert_entry_in_blocks(
block_height: u32,
lazy_block: &LazyBlock,
block_bytes: &[u8],
update_tip: bool,
blocks_db_rw: &DB,
ctx: &Context,
@@ -327,7 +351,7 @@ pub fn insert_entry_in_blocks(
let block_height_bytes = block_height.to_be_bytes();
let mut retries = 0;
loop {
let res = blocks_db_rw.put(&block_height_bytes, &lazy_block.bytes);
let res = blocks_db_rw.put(&block_height_bytes, block_bytes);
match res {
Ok(_) => break,
Err(e) => {
@@ -360,13 +384,12 @@ pub fn find_last_block_inserted(blocks_db: &DB) -> u32 {
}
}
pub fn find_lazy_block_at_block_height(
pub fn find_pinned_block_bytes_at_block_height<'a>(
block_height: u32,
retry: u8,
try_iterator: bool,
blocks_db: &DB,
blocks_db: &'a DB,
ctx: &Context,
) -> Option<LazyBlock> {
) -> Option<DBPinnableSlice<'a>> {
let mut attempt = 1;
// let mut read_options = rocksdb::ReadOptions::default();
// read_options.fill_cache(true);
@@ -375,24 +398,9 @@ pub fn find_lazy_block_at_block_height(
let mut rng = thread_rng();
loop {
match blocks_db.get(block_height.to_be_bytes()) {
Ok(Some(res)) => return Some(LazyBlock::new(res)),
match blocks_db.get_pinned(block_height.to_be_bytes()) {
Ok(Some(res)) => return Some(res),
_ => {
if attempt == 1 && try_iterator {
ctx.try_log(|logger| {
warn!(
logger,
"Attempt to retrieve block #{} through iterator", block_height,
)
});
let mut iter = blocks_db.iterator(rocksdb::IteratorMode::End);
let block_height_bytes = block_height.to_be_bytes();
while let Some(Ok((k, res))) = iter.next() {
if (*k).eq(&block_height_bytes) {
return Some(LazyBlock::new(res.to_vec()));
}
}
}
attempt += 1;
backoff = 2.0 * backoff + (backoff * rng.gen_range(0.0..1.0));
let duration = std::time::Duration::from_millis((backoff * 1_000.0) as u64);
@@ -411,6 +419,56 @@ pub fn find_lazy_block_at_block_height(
}
}
pub fn find_block_bytes_at_block_height<'a>(
block_height: u32,
retry: u8,
blocks_db: &DB,
ctx: &Context,
) -> Option<Vec<u8>> {
let mut attempt = 1;
// let mut read_options = rocksdb::ReadOptions::default();
// read_options.fill_cache(true);
// read_options.set_verify_checksums(false);
let mut backoff: f64 = 1.0;
let mut rng = thread_rng();
loop {
match blocks_db.get(block_height.to_be_bytes()) {
Ok(Some(res)) => return Some(res),
_ => {
attempt += 1;
backoff = 2.0 * backoff + (backoff * rng.gen_range(0.0..1.0));
let duration = std::time::Duration::from_millis((backoff * 1_000.0) as u64);
ctx.try_log(|logger| {
warn!(
logger,
"Unable to find block #{}, will retry in {:?}", block_height, duration
)
});
std::thread::sleep(duration);
if attempt > retry {
return None;
}
}
}
}
}
pub fn run_compaction(blocks_db_rw: &DB, lim: u32) {
let gen = 0u32.to_be_bytes();
let _ = blocks_db_rw.compact_range(Some(&gen), Some(&lim.to_be_bytes()));
}
pub fn find_missing_blocks(blocks_db: &DB, start: u32, end: u32, ctx: &Context) -> Vec<u32> {
let mut missing_blocks = vec![];
for i in start..=end {
if find_pinned_block_bytes_at_block_height(i as u32, 0, &blocks_db, ctx).is_none() {
missing_blocks.push(i);
}
}
missing_blocks
}
pub fn remove_entry_from_blocks(block_height: u32, blocks_db_rw: &DB, ctx: &Context) {
if let Err(e) = blocks_db_rw.delete(block_height.to_be_bytes()) {
ctx.try_log(|logger| error!(logger, "{}", e.to_string()));
@@ -439,8 +497,8 @@ pub fn insert_entry_in_inscriptions(
ctx: &Context,
) {
while let Err(e) = inscriptions_db_conn_rw.execute(
"INSERT INTO inscriptions (inscription_id, ordinal_number, inscription_number, block_height) VALUES (?1, ?2, ?3, ?4)",
rusqlite::params![&inscription_data.inscription_id, &inscription_data.ordinal_number, &inscription_data.inscription_number, &block_identifier.index],
"INSERT INTO inscriptions (inscription_id, ordinal_number, jubilee_inscription_number, classic_inscription_number, block_height, input_index) VALUES (?1, ?2, ?3, ?4, ?5, ?6)",
rusqlite::params![&inscription_data.inscription_id, &inscription_data.ordinal_number, &inscription_data.inscription_number.jubilee, &inscription_data.inscription_number.classic, &block_identifier.index, &inscription_data.inscription_input_index],
) {
ctx.try_log(|logger| warn!(logger, "unable to query hord.sqlite: {}", e.to_string()));
std::thread::sleep(std::time::Duration::from_secs(1));
@@ -506,25 +564,34 @@ pub fn update_sequence_metadata_with_block(
inscriptions_db_conn_rw: &Connection,
ctx: &Context,
) {
let mut latest_blessed = find_latest_inscription_number_at_block_height(
let mut nth_classic_pos_number = find_nth_classic_pos_number_at_block_height(
&block.block_identifier.index,
inscriptions_db_conn_rw,
ctx,
)
.unwrap_or(0);
let mut latest_cursed = find_latest_cursed_inscription_number_at_block_height(
let mut nth_classic_neg_number = find_nth_classic_neg_number_at_block_height(
&block.block_identifier.index,
inscriptions_db_conn_rw,
ctx,
)
.unwrap_or(0);
let mut nth_jubilee_number = find_nth_jubilee_number_at_block_height(
&block.block_identifier.index,
inscriptions_db_conn_rw,
ctx,
)
.unwrap_or(0);
for inscription_data in get_inscriptions_revealed_in_block(&block).iter() {
latest_blessed = latest_blessed.max(inscription_data.inscription_number);
latest_cursed = latest_cursed.min(inscription_data.inscription_number);
nth_classic_pos_number =
nth_classic_pos_number.max(inscription_data.inscription_number.classic);
nth_classic_neg_number =
nth_classic_neg_number.min(inscription_data.inscription_number.classic);
nth_jubilee_number = nth_jubilee_number.max(inscription_data.inscription_number.jubilee);
}
while let Err(e) = inscriptions_db_conn_rw.execute(
"INSERT INTO sequence_metadata (block_height, latest_inscription_number, latest_cursed_inscription_number) VALUES (?1, ?2, ?3)",
rusqlite::params![&block.block_identifier.index, latest_blessed, latest_cursed],
"INSERT INTO sequence_metadata (block_height, nth_classic_pos_number, nth_classic_neg_number, nth_jubilee_number) VALUES (?1, ?2, ?3, ?4)",
rusqlite::params![&block.block_identifier.index, nth_classic_pos_number, nth_classic_neg_number, nth_jubilee_number],
) {
ctx.try_log(|logger| warn!(logger, "unable to update sequence_metadata: {}", e.to_string()));
std::thread::sleep(std::time::Duration::from_secs(1));
@@ -862,37 +929,49 @@ pub fn find_all_inscription_transfers(
})
}
pub fn find_latest_inscription_number_at_block_height(
pub fn find_nth_classic_pos_number_at_block_height(
block_height: &u64,
db_conn: &Connection,
ctx: &Context,
) -> Option<i64> {
let args: &[&dyn ToSql] = &[&block_height.to_sql().unwrap()];
let query = "SELECT latest_inscription_number FROM sequence_metadata WHERE block_height < ? ORDER BY block_height DESC LIMIT 1";
let query = "SELECT nth_classic_pos_number FROM sequence_metadata WHERE block_height < ? ORDER BY block_height DESC LIMIT 1";
perform_query_one(query, args, db_conn, ctx, |row| {
let inscription_number: i64 = row.get(0).unwrap();
inscription_number
})
.or_else(|| compute_latest_inscription_number_at_block_height(block_height, db_conn, ctx))
.or_else(|| compute_nth_classic_pos_number_at_block_height(block_height, db_conn, ctx))
}
pub fn find_latest_cursed_inscription_number_at_block_height(
pub fn find_nth_classic_neg_number_at_block_height(
block_height: &u64,
db_conn: &Connection,
ctx: &Context,
) -> Option<i64> {
let args: &[&dyn ToSql] = &[&block_height.to_sql().unwrap()];
let query = "SELECT latest_cursed_inscription_number FROM sequence_metadata WHERE block_height < ? ORDER BY block_height DESC LIMIT 1";
let query = "SELECT nth_classic_neg_number FROM sequence_metadata WHERE block_height < ? ORDER BY block_height DESC LIMIT 1";
perform_query_one(query, args, db_conn, ctx, |row| {
let inscription_number: i64 = row.get(0).unwrap();
inscription_number
})
.or_else(|| {
compute_latest_cursed_inscription_number_at_block_height(block_height, db_conn, ctx)
})
.or_else(|| compute_nth_classic_neg_number_at_block_height(block_height, db_conn, ctx))
}
pub fn compute_latest_inscription_number_at_block_height(
pub fn find_nth_jubilee_number_at_block_height(
block_height: &u64,
db_conn: &Connection,
ctx: &Context,
) -> Option<i64> {
let args: &[&dyn ToSql] = &[&block_height.to_sql().unwrap()];
let query = "SELECT nth_jubilee_number FROM sequence_metadata WHERE block_height < ? ORDER BY block_height DESC LIMIT 1";
perform_query_one(query, args, db_conn, ctx, |row| {
let inscription_number: i64 = row.get(0).unwrap();
inscription_number
})
.or_else(|| compute_nth_jubilee_number_at_block_height(block_height, db_conn, ctx))
}
pub fn compute_nth_jubilee_number_at_block_height(
block_height: &u64,
db_conn: &Connection,
ctx: &Context,
@@ -904,14 +983,14 @@ pub fn compute_latest_inscription_number_at_block_height(
)
});
let args: &[&dyn ToSql] = &[&block_height.to_sql().unwrap()];
let query = "SELECT inscription_number FROM inscriptions WHERE block_height < ? ORDER BY inscription_number DESC LIMIT 1";
let query = "SELECT jubilee_inscription_number FROM inscriptions WHERE block_height < ? ORDER BY jubilee_inscription_number DESC LIMIT 1";
perform_query_one(query, args, db_conn, ctx, |row| {
let inscription_number: i64 = row.get(0).unwrap();
inscription_number
})
}
pub fn compute_latest_cursed_inscription_number_at_block_height(
pub fn compute_nth_classic_pos_number_at_block_height(
block_height: &u64,
db_conn: &Connection,
ctx: &Context,
@@ -919,11 +998,30 @@ pub fn compute_latest_cursed_inscription_number_at_block_height(
ctx.try_log(|logger| {
warn!(
logger,
"Start computing latest_cursed_inscription_number at block height: {block_height}"
"Start computing latest_inscription_number at block height: {block_height}"
)
});
let args: &[&dyn ToSql] = &[&block_height.to_sql().unwrap()];
let query = "SELECT inscription_number FROM inscriptions WHERE block_height < ? ORDER BY inscription_number ASC LIMIT 1";
let query = "SELECT classic_inscription_number FROM inscriptions WHERE block_height < ? ORDER BY classic_inscription_number DESC LIMIT 1";
perform_query_one(query, args, db_conn, ctx, |row| {
let inscription_number: i64 = row.get(0).unwrap();
inscription_number
})
}
pub fn compute_nth_classic_neg_number_at_block_height(
block_height: &u64,
db_conn: &Connection,
ctx: &Context,
) -> Option<i64> {
ctx.try_log(|logger| {
warn!(
logger,
"Start computing nth_classic_neg_number at block height: {block_height}"
)
});
let args: &[&dyn ToSql] = &[&block_height.to_sql().unwrap()];
let query = "SELECT classic_inscription_number FROM inscriptions WHERE block_height < ? ORDER BY classic_inscription_number ASC LIMIT 1";
perform_query_one(query, args, db_conn, ctx, |row| {
let inscription_number: i64 = row.get(0).unwrap();
inscription_number
@@ -936,7 +1034,7 @@ pub fn find_blessed_inscription_with_ordinal_number(
ctx: &Context,
) -> Option<String> {
let args: &[&dyn ToSql] = &[&ordinal_number.to_sql().unwrap()];
let query = "SELECT inscription_id FROM inscriptions WHERE ordinal_number = ? AND inscription_number >= 0";
let query = "SELECT inscription_id FROM inscriptions WHERE ordinal_number = ? AND classic_inscription_number >= 0";
perform_query_one(query, args, db_conn, ctx, |row| {
let inscription_id: String = row.get(0).unwrap();
inscription_id
@@ -953,13 +1051,16 @@ pub fn find_inscription_with_id(
return Err(format!("unable to retrieve location for {inscription_id}"));
};
let args: &[&dyn ToSql] = &[&inscription_id.to_sql().unwrap()];
let query = "SELECT inscription_number, ordinal_number, block_height FROM inscriptions WHERE inscription_id = ?";
let query = "SELECT classic_inscription_number, jubilee_inscription_number, ordinal_number, block_height, input_index FROM inscriptions WHERE inscription_id = ?";
let entry = perform_query_one(query, args, db_conn, ctx, move |row| {
let inscription_number: i64 = row.get(0).unwrap();
let ordinal_number: u64 = row.get(1).unwrap();
let block_height: u64 = row.get(2).unwrap();
let (transaction_identifier_inscription, inscription_input_index) =
parse_inscription_id(inscription_id);
let inscription_number = OrdinalInscriptionNumber {
classic: row.get(0).unwrap(),
jubilee: row.get(1).unwrap(),
};
let ordinal_number: u64 = row.get(2).unwrap();
let block_height: u64 = row.get(3).unwrap();
let inscription_input_index: usize = row.get(4).unwrap();
let (transaction_identifier_inscription, _) = parse_inscription_id(inscription_id);
(
inscription_number,
ordinal_number,
@@ -1001,7 +1102,7 @@ pub fn find_all_inscriptions_in_block(
let args: &[&dyn ToSql] = &[&block_height.to_sql().unwrap()];
let mut stmt = loop {
match inscriptions_db_tx.prepare("SELECT inscription_number, ordinal_number, inscription_id FROM inscriptions where block_height = ? ORDER BY inscription_number ASC")
match inscriptions_db_tx.prepare("SELECT classic_inscription_number, jubilee_inscription_number, ordinal_number, inscription_id, input_index FROM inscriptions where block_height = ?")
{
Ok(stmt) => break stmt,
Err(e) => {
@@ -1028,10 +1129,14 @@ pub fn find_all_inscriptions_in_block(
loop {
match rows.next() {
Ok(Some(row)) => {
let inscription_number: i64 = row.get(0).unwrap();
let ordinal_number: u64 = row.get(1).unwrap();
let inscription_id: String = row.get(2).unwrap();
let (transaction_identifier_inscription, inscription_input_index) =
let inscription_number = OrdinalInscriptionNumber {
classic: row.get(0).unwrap(),
jubilee: row.get(1).unwrap(),
};
let ordinal_number: u64 = row.get(2).unwrap();
let inscription_id: String = row.get(3).unwrap();
let inscription_input_index: usize = row.get(4).unwrap();
let (transaction_identifier_inscription, _) =
{ parse_inscription_id(&inscription_id) };
let Some(transfer_data) = transfers_data
.get(&inscription_id)
@@ -1077,13 +1182,6 @@ pub struct WatchedSatpoint {
pub offset: u64,
}
impl WatchedSatpoint {
pub fn get_genesis_satpoint(&self) -> String {
let (transaction_id, input) = parse_inscription_id(&self.inscription_id);
format!("{}:{}", transaction_id.hash, input)
}
}
pub fn find_watched_satpoint_for_inscription(
inscription_id: &str,
db_conn: &Connection,
@@ -1237,7 +1335,7 @@ pub fn delete_data_in_ordhook_db(
#[derive(Clone, Debug)]
pub struct TraversalResult {
pub inscription_number: i64,
pub inscription_number: OrdinalInscriptionNumber,
pub inscription_input_index: usize,
pub transaction_identifier_inscription: TransactionIdentifier,
pub ordinal_number: u64,
@@ -1278,6 +1376,17 @@ pub fn format_satpoint_to_watch(
)
}
pub fn format_inscription_id(
transaction_identifier: &TransactionIdentifier,
inscription_subindex: usize,
) -> String {
format!(
"{}i{}",
transaction_identifier.get_hash_bytes_str(),
inscription_subindex,
)
}
pub fn parse_satpoint_to_watch(outpoint_to_watch: &str) -> (TransactionIdentifier, usize, u64) {
let comps: Vec<&str> = outpoint_to_watch.split(":").collect();
let tx = TransactionIdentifier::new(comps[0]);
@@ -1324,21 +1433,21 @@ pub fn parse_outpoint_to_watch(outpoint_to_watch: &str) -> (TransactionIdentifie
}
#[derive(Debug)]
pub struct LazyBlock {
pub bytes: Vec<u8>,
pub struct BlockBytesCursor<'a> {
pub bytes: &'a [u8],
pub tx_len: u16,
}
#[derive(Debug, Clone)]
pub struct LazyBlockTransaction {
pub struct TransactionBytesCursor {
pub txid: [u8; 8],
pub inputs: Vec<LazyBlockTransactionInput>,
pub inputs: Vec<TransactionInputBytesCursor>,
pub outputs: Vec<u64>,
}
impl LazyBlockTransaction {
impl TransactionBytesCursor {
pub fn get_average_bytes_size() -> usize {
TXID_LEN + 3 * LazyBlockTransactionInput::get_average_bytes_size() + 3 * SATS_LEN
TXID_LEN + 3 * TransactionInputBytesCursor::get_average_bytes_size() + 3 * SATS_LEN
}
pub fn get_sat_ranges(&self) -> Vec<(u64, u64)> {
@@ -1364,14 +1473,14 @@ impl LazyBlockTransaction {
}
#[derive(Debug, Clone)]
pub struct LazyBlockTransactionInput {
pub struct TransactionInputBytesCursor {
pub txin: [u8; 8],
pub block_height: u32,
pub vout: u16,
pub txin_value: u64,
}
impl LazyBlockTransactionInput {
impl TransactionInputBytesCursor {
pub fn get_average_bytes_size() -> usize {
TXID_LEN + SATS_LEN + 4 + 2
}
@@ -1382,16 +1491,20 @@ const SATS_LEN: usize = 8;
const INPUT_SIZE: usize = TXID_LEN + 4 + 2 + SATS_LEN;
const OUTPUT_SIZE: usize = 8;
impl LazyBlock {
pub fn new(bytes: Vec<u8>) -> LazyBlock {
impl<'a> BlockBytesCursor<'a> {
pub fn new(bytes: &[u8]) -> BlockBytesCursor {
let tx_len = u16::from_be_bytes([bytes[0], bytes[1]]);
LazyBlock { bytes, tx_len }
BlockBytesCursor { bytes, tx_len }
}
pub fn get_coinbase_data_pos(&self) -> usize {
(2 + self.tx_len * 2 * 2) as usize
}
pub fn get_coinbase_outputs_len(&self) -> usize {
u16::from_be_bytes([self.bytes[4], self.bytes[5]]) as usize
}
pub fn get_u64_at_pos(&self, pos: usize) -> u64 {
u64::from_be_bytes([
self.bytes[pos],
@@ -1410,13 +1523,8 @@ impl LazyBlock {
&self.bytes[pos..pos + TXID_LEN]
}
pub fn get_coinbase_sats(&self) -> u64 {
let pos = self.get_coinbase_data_pos() + TXID_LEN;
self.get_u64_at_pos(pos)
}
pub fn get_transactions_data_pos(&self) -> usize {
self.get_coinbase_data_pos() + TXID_LEN + SATS_LEN
self.get_coinbase_data_pos()
}
pub fn get_transaction_format(&self, index: u16) -> (u16, u16, usize) {
@@ -1431,13 +1539,13 @@ impl LazyBlock {
(inputs, outputs, size)
}
pub fn get_lazy_transaction_at_pos(
pub fn get_transaction_bytes_cursor_at_pos(
&self,
cursor: &mut Cursor<&Vec<u8>>,
cursor: &mut Cursor<&[u8]>,
txid: [u8; 8],
inputs_len: u16,
outputs_len: u16,
) -> LazyBlockTransaction {
) -> TransactionBytesCursor {
let mut inputs = Vec::with_capacity(inputs_len as usize);
for _ in 0..inputs_len {
let mut txin = [0u8; 8];
@@ -1450,7 +1558,7 @@ impl LazyBlock {
cursor.read_exact(&mut vout).expect("data corrupted");
let mut txin_value = [0u8; 8];
cursor.read_exact(&mut txin_value).expect("data corrupted");
inputs.push(LazyBlockTransactionInput {
inputs.push(TransactionInputBytesCursor {
txin: txin,
block_height: u32::from_be_bytes(block_height),
vout: u16::from_be_bytes(vout),
@@ -1463,7 +1571,7 @@ impl LazyBlock {
cursor.read_exact(&mut value).expect("data corrupted");
outputs.push(u64::from_be_bytes(value))
}
LazyBlockTransaction {
TransactionBytesCursor {
txid,
inputs,
outputs,
@@ -1473,10 +1581,10 @@ impl LazyBlock {
pub fn find_and_serialize_transaction_with_txid(
&self,
searched_txid: &[u8],
) -> Option<LazyBlockTransaction> {
) -> Option<TransactionBytesCursor> {
// println!("{:?}", hex::encode(searched_txid));
let mut entry = None;
let mut cursor = Cursor::new(&self.bytes);
let mut cursor = Cursor::new(self.bytes);
let mut cumulated_offset = 0;
let mut i = 0;
while entry.is_none() {
@@ -1488,7 +1596,7 @@ impl LazyBlock {
let _ = cursor.read_exact(&mut txid);
// println!("-> {}", hex::encode(txid));
if searched_txid.eq(&txid) {
entry = Some(self.get_lazy_transaction_at_pos(
entry = Some(self.get_transaction_bytes_cursor_at_pos(
&mut cursor,
txid,
inputs_len,
@@ -1505,19 +1613,19 @@ impl LazyBlock {
entry
}
pub fn iter_tx(&self) -> LazyBlockTransactionIterator {
LazyBlockTransactionIterator::new(&self)
pub fn iter_tx(&self) -> TransactionBytesCursorIterator {
TransactionBytesCursorIterator::new(&self)
}
pub fn from_full_block(block: &BitcoinBlockFullBreakdown) -> std::io::Result<LazyBlock> {
pub fn from_full_block<'b>(block: &BitcoinBlockFullBreakdown) -> std::io::Result<Vec<u8>> {
let mut buffer = vec![];
// Number of transactions in the block (not including coinbase)
let tx_len = block.tx.len() as u16 - 1;
let tx_len = block.tx.len() as u16;
buffer.write(&tx_len.to_be_bytes())?;
// For each transaction:
let u16_max = u16::MAX as usize;
for tx in block.tx.iter().skip(1) {
let inputs_len = if tx.vin.len() > u16_max {
for (i, tx) in block.tx.iter().enumerate() {
let mut inputs_len = if tx.vin.len() > u16_max {
0
} else {
tx.vin.len() as u16
@@ -1527,27 +1635,16 @@ impl LazyBlock {
} else {
tx.vout.len() as u16
};
if i == 0 {
inputs_len = 0;
}
// Number of inputs
buffer.write(&inputs_len.to_be_bytes())?;
// Number of outputs
buffer.write(&outputs_len.to_be_bytes())?;
}
// Coinbase transaction txid - 8 first bytes
let coinbase_txid = {
let txid = hex::decode(block.tx[0].txid.to_string()).unwrap();
[
txid[0], txid[1], txid[2], txid[3], txid[4], txid[5], txid[6], txid[7],
]
};
buffer.write_all(&coinbase_txid)?;
// Coinbase transaction value
let mut coinbase_value = 0;
for coinbase_output in block.tx[0].vout.iter() {
coinbase_value += coinbase_output.value.to_sat();
}
buffer.write(&coinbase_value.to_be_bytes())?;
// For each transaction:
for tx in block.tx.iter().skip(1) {
for tx in block.tx.iter() {
// txid - 8 first bytes
let txid = {
let txid = hex::decode(tx.txid.to_string()).unwrap();
@@ -1572,8 +1669,11 @@ impl LazyBlock {
for i in 0..inputs_len {
let input = &tx.vin[i];
// txin - 8 first bytes
let Some(input_txid) = input.txid.as_ref() else {
continue;
};
let txin = {
let txid = hex::decode(input.txid.as_ref().unwrap().to_string()).unwrap();
let txid = hex::decode(input_txid).unwrap();
[
txid[0], txid[1], txid[2], txid[3], txid[4], txid[5], txid[6], txid[7],
]
@@ -1596,53 +1696,48 @@ impl LazyBlock {
buffer.write(&sats.to_be_bytes())?;
}
}
Ok(Self::new(buffer))
Ok(buffer)
}
pub fn from_standardized_block(block: &BitcoinBlockData) -> std::io::Result<LazyBlock> {
pub fn from_standardized_block<'b>(block: &BitcoinBlockData) -> std::io::Result<Vec<u8>> {
let mut buffer = vec![];
// Number of transactions in the block (not including coinbase)
let tx_len = block.transactions.len() as u16 - 1;
let tx_len = block.transactions.len() as u16;
buffer.write(&tx_len.to_be_bytes())?;
// For each transaction:
for tx in block.transactions.iter().skip(1) {
let inputs_len = tx.metadata.inputs.len() as u16;
for (i, tx) in block.transactions.iter().enumerate() {
let inputs_len = if i > 0 {
tx.metadata.inputs.len() as u16
} else {
0
};
let outputs_len = tx.metadata.outputs.len() as u16;
// Number of inputs
buffer.write(&inputs_len.to_be_bytes())?;
// Number of outputs
buffer.write(&outputs_len.to_be_bytes())?;
}
// Coinbase transaction txid - 8 first bytes
let coinbase_txid = block.transactions[0]
.transaction_identifier
.get_8_hash_bytes();
buffer.write_all(&coinbase_txid)?;
// Coinbase transaction value
let mut coinbase_value = 0;
for coinbase_output in block.transactions[0].metadata.outputs.iter() {
coinbase_value += coinbase_output.value;
}
buffer.write_all(&coinbase_value.to_be_bytes())?;
// For each transaction:
for tx in block.transactions.iter().skip(1) {
for (i, tx) in block.transactions.iter().enumerate() {
// txid - 8 first bytes
let txid = tx.transaction_identifier.get_8_hash_bytes();
buffer.write_all(&txid)?;
// For each transaction input:
for input in tx.metadata.inputs.iter() {
// txin - 8 first bytes
let txin = input.previous_output.txid.get_8_hash_bytes();
buffer.write_all(&txin)?;
// txin's block height
let block_height = input.previous_output.block_height as u32;
buffer.write(&block_height.to_be_bytes())?;
// txin's vout index
let vout = input.previous_output.vout as u16;
buffer.write(&vout.to_be_bytes())?;
// txin's sats value
let sats = input.previous_output.value;
buffer.write(&sats.to_be_bytes())?;
// For each non coinbase transaction input:
if i > 0 {
for input in tx.metadata.inputs.iter() {
// txin - 8 first bytes
let txin = input.previous_output.txid.get_8_hash_bytes();
buffer.write_all(&txin)?;
// txin's block height
let block_height = input.previous_output.block_height as u32;
buffer.write(&block_height.to_be_bytes())?;
// txin's vout index
let vout = input.previous_output.vout as u16;
buffer.write(&vout.to_be_bytes())?;
// txin's sats value
let sats = input.previous_output.value;
buffer.write(&sats.to_be_bytes())?;
}
}
// For each transaction output:
for output in tx.metadata.outputs.iter() {
@@ -1650,43 +1745,45 @@ impl LazyBlock {
buffer.write(&sats.to_be_bytes())?;
}
}
Ok(Self::new(buffer))
Ok(buffer)
}
}
pub struct LazyBlockTransactionIterator<'a> {
lazy_block: &'a LazyBlock,
pub struct TransactionBytesCursorIterator<'a> {
block_bytes_cursor: &'a BlockBytesCursor<'a>,
tx_index: u16,
cumulated_offset: usize,
}
impl<'a> LazyBlockTransactionIterator<'a> {
pub fn new(lazy_block: &'a LazyBlock) -> LazyBlockTransactionIterator<'a> {
LazyBlockTransactionIterator {
lazy_block,
impl<'a> TransactionBytesCursorIterator<'a> {
pub fn new(block_bytes_cursor: &'a BlockBytesCursor) -> TransactionBytesCursorIterator<'a> {
TransactionBytesCursorIterator {
block_bytes_cursor,
tx_index: 0,
cumulated_offset: 0,
}
}
}
impl<'a> Iterator for LazyBlockTransactionIterator<'a> {
type Item = LazyBlockTransaction;
impl<'a> Iterator for TransactionBytesCursorIterator<'a> {
type Item = TransactionBytesCursor;
fn next(&mut self) -> Option<LazyBlockTransaction> {
if self.tx_index >= self.lazy_block.tx_len {
fn next(&mut self) -> Option<TransactionBytesCursor> {
if self.tx_index >= self.block_bytes_cursor.tx_len {
return None;
}
let pos = self.lazy_block.get_transactions_data_pos() + self.cumulated_offset;
let (inputs_len, outputs_len, size) = self.lazy_block.get_transaction_format(self.tx_index);
let pos = self.block_bytes_cursor.get_transactions_data_pos() + self.cumulated_offset;
let (inputs_len, outputs_len, size) = self
.block_bytes_cursor
.get_transaction_format(self.tx_index);
// println!("{inputs_len} / {outputs_len} / {size}");
let mut cursor = Cursor::new(&self.lazy_block.bytes);
let mut cursor = Cursor::new(self.block_bytes_cursor.bytes);
cursor.set_position(pos as u64);
let mut txid = [0u8; 8];
let _ = cursor.read_exact(&mut txid);
self.cumulated_offset += size;
self.tx_index += 1;
Some(self.lazy_block.get_lazy_transaction_at_pos(
Some(self.block_bytes_cursor.get_transaction_bytes_cursor_at_pos(
&mut cursor,
txid,
inputs_len,
@@ -1694,3 +1791,103 @@ impl<'a> Iterator for LazyBlockTransactionIterator<'a> {
))
}
}
#[cfg(test)]
mod tests {
use super::*;
use chainhook_sdk::{
indexer::bitcoin::{parse_downloaded_block, standardize_bitcoin_block},
types::BitcoinNetwork,
};
#[test]
fn test_block_cursor_roundtrip() {
let ctx = Context::empty();
let block = include_str!("./fixtures/blocks_json/279671.json");
let decoded_block =
parse_downloaded_block(block.as_bytes().to_vec()).expect("unable to decode block");
let standardized_block =
standardize_bitcoin_block(decoded_block.clone(), &BitcoinNetwork::Mainnet, &ctx)
.expect("unable to standardize block");
for (index, (tx_in, tx_out)) in decoded_block
.tx
.iter()
.zip(standardized_block.transactions.iter())
.enumerate()
{
// Test outputs
assert_eq!(tx_in.vout.len(), tx_out.metadata.outputs.len());
for (output, src) in tx_out.metadata.outputs.iter().zip(tx_in.vout.iter()) {
assert_eq!(output.value, src.value.to_sat());
}
// Test inputs (non-coinbase transactions only)
if index == 0 {
continue;
}
assert_eq!(tx_in.vin.len(), tx_out.metadata.inputs.len());
for (input, src) in tx_out.metadata.inputs.iter().zip(tx_in.vin.iter()) {
assert_eq!(
input.previous_output.block_height,
src.prevout.as_ref().unwrap().height
);
assert_eq!(
input.previous_output.value,
src.prevout.as_ref().unwrap().value.to_sat()
);
let txin = hex::decode(src.txid.as_ref().unwrap()).unwrap();
assert_eq!(input.previous_output.txid.get_hash_bytes(), txin);
assert_eq!(input.previous_output.vout, src.vout.unwrap());
}
}
let bytes = BlockBytesCursor::from_full_block(&decoded_block).expect("unable to serialize");
let bytes_via_standardized = BlockBytesCursor::from_standardized_block(&standardized_block)
.expect("unable to serialize");
assert_eq!(bytes, bytes_via_standardized);
let block_bytes_cursor = BlockBytesCursor::new(&bytes);
assert_eq!(decoded_block.tx.len(), block_bytes_cursor.tx_len as usize);
// Test helpers
let coinbase_txid = block_bytes_cursor.get_coinbase_txid();
assert_eq!(
coinbase_txid,
standardized_block.transactions[0]
.transaction_identifier
.get_8_hash_bytes()
);
// Test transactions
for (index, (tx_in, tx_out)) in decoded_block
.tx
.iter()
.zip(block_bytes_cursor.iter_tx())
.enumerate()
{
// Test outputs
assert_eq!(tx_in.vout.len(), tx_out.outputs.len());
for (sats, src) in tx_out.outputs.iter().zip(tx_in.vout.iter()) {
assert_eq!(*sats, src.value.to_sat());
}
// Test inputs (non-coinbase transactions only)
if index == 0 {
continue;
}
assert_eq!(tx_in.vin.len(), tx_out.inputs.len());
for (tx_bytes_cursor, src) in tx_out.inputs.iter().zip(tx_in.vin.iter()) {
assert_eq!(
tx_bytes_cursor.block_height as u64,
src.prevout.as_ref().unwrap().height
);
assert_eq!(
tx_bytes_cursor.txin_value,
src.prevout.as_ref().unwrap().value.to_sat()
);
let txin = hex::decode(src.txid.as_ref().unwrap()).unwrap();
assert_eq!(tx_bytes_cursor.txin, txin[0..tx_bytes_cursor.txin.len()]);
assert_eq!(tx_bytes_cursor.vout as u32, src.vout.unwrap());
}
}
}
}

View File

@@ -65,10 +65,7 @@ impl Chain {
bitcoin::blockdata::constants::genesis_block(self.network())
}
pub fn address_from_script(
self,
script: &Script,
) -> Result<Address, bitcoin::util::address::Error> {
pub fn address_from_script(self, script: &Script) -> Result<Address, bitcoin::address::Error> {
Address::from_script(script, self.network())
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,4 @@
use chainhook_sdk::bitcoincore_rpc::bitcoin::blockdata::constants::COIN_VALUE;
use super::{height::Height, sat::Sat, SUBSIDY_HALVING_INTERVAL};
use super::{height::Height, sat::Sat, COIN_VALUE, SUBSIDY_HALVING_INTERVAL};
#[derive(Copy, Clone, Eq, PartialEq, Debug, PartialOrd)]
pub(crate) struct Epoch(pub(crate) u64);

View File

@@ -0,0 +1,614 @@
use chainhook_sdk::bitcoin::{hashes::Hash, Txid};
use super::{inscription_id::InscriptionId, media::Media};
use {
super::*,
chainhook_sdk::bitcoin::{
blockdata::{
opcodes,
script::{self, PushBytesBuf},
},
ScriptBuf,
},
std::str,
};
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Eq, Default)]
pub struct Inscription {
pub body: Option<Vec<u8>>,
pub content_encoding: Option<Vec<u8>>,
pub content_type: Option<Vec<u8>>,
pub duplicate_field: bool,
pub incomplete_field: bool,
pub metadata: Option<Vec<u8>>,
pub metaprotocol: Option<Vec<u8>>,
pub parent: Option<Vec<u8>>,
pub pointer: Option<Vec<u8>>,
pub unrecognized_even_field: bool,
}
impl Inscription {
#[cfg(test)]
pub(crate) fn new(content_type: Option<Vec<u8>>, body: Option<Vec<u8>>) -> Self {
Self {
content_type,
body,
..Default::default()
}
}
pub(crate) fn pointer_value(pointer: u64) -> Vec<u8> {
let mut bytes = pointer.to_le_bytes().to_vec();
while bytes.last().copied() == Some(0) {
bytes.pop();
}
bytes
}
pub(crate) fn append_reveal_script_to_builder(
&self,
mut builder: script::Builder,
) -> script::Builder {
builder = builder
.push_opcode(opcodes::OP_FALSE)
.push_opcode(opcodes::all::OP_IF)
.push_slice(envelope::PROTOCOL_ID);
if let Some(content_type) = self.content_type.clone() {
builder = builder
.push_slice(envelope::CONTENT_TYPE_TAG)
.push_slice(PushBytesBuf::try_from(content_type).unwrap());
}
if let Some(content_encoding) = self.content_encoding.clone() {
builder = builder
.push_slice(envelope::CONTENT_ENCODING_TAG)
.push_slice(PushBytesBuf::try_from(content_encoding).unwrap());
}
if let Some(protocol) = self.metaprotocol.clone() {
builder = builder
.push_slice(envelope::METAPROTOCOL_TAG)
.push_slice(PushBytesBuf::try_from(protocol).unwrap());
}
if let Some(parent) = self.parent.clone() {
builder = builder
.push_slice(envelope::PARENT_TAG)
.push_slice(PushBytesBuf::try_from(parent).unwrap());
}
if let Some(pointer) = self.pointer.clone() {
builder = builder
.push_slice(envelope::POINTER_TAG)
.push_slice(PushBytesBuf::try_from(pointer).unwrap());
}
if let Some(metadata) = &self.metadata {
for chunk in metadata.chunks(520) {
builder = builder.push_slice(envelope::METADATA_TAG);
builder = builder.push_slice(PushBytesBuf::try_from(chunk.to_vec()).unwrap());
}
}
if let Some(body) = &self.body {
builder = builder.push_slice(envelope::BODY_TAG);
for chunk in body.chunks(520) {
builder = builder.push_slice(PushBytesBuf::try_from(chunk.to_vec()).unwrap());
}
}
builder.push_opcode(opcodes::all::OP_ENDIF)
}
#[cfg(test)]
pub(crate) fn append_reveal_script(&self, builder: script::Builder) -> ScriptBuf {
self.append_reveal_script_to_builder(builder).into_script()
}
pub(crate) fn append_batch_reveal_script_to_builder(
inscriptions: &[Inscription],
mut builder: script::Builder,
) -> script::Builder {
for inscription in inscriptions {
builder = inscription.append_reveal_script_to_builder(builder);
}
builder
}
pub(crate) fn append_batch_reveal_script(
inscriptions: &[Inscription],
builder: script::Builder,
) -> ScriptBuf {
Inscription::append_batch_reveal_script_to_builder(inscriptions, builder).into_script()
}
pub(crate) fn media(&self) -> Media {
if self.body.is_none() {
return Media::Unknown;
}
let Some(content_type) = self.content_type() else {
return Media::Unknown;
};
content_type.parse().unwrap_or(Media::Unknown)
}
pub(crate) fn body(&self) -> Option<&[u8]> {
Some(self.body.as_ref()?)
}
pub(crate) fn into_body(self) -> Option<Vec<u8>> {
self.body
}
pub(crate) fn content_length(&self) -> Option<usize> {
Some(self.body()?.len())
}
pub(crate) fn content_type(&self) -> Option<&str> {
str::from_utf8(self.content_type.as_ref()?).ok()
}
pub(crate) fn metaprotocol(&self) -> Option<&str> {
str::from_utf8(self.metaprotocol.as_ref()?).ok()
}
pub(crate) fn parent(&self) -> Option<InscriptionId> {
use chainhook_sdk::bitcoin::hash_types::Txid as TXID_LEN;
let value = self.parent.as_ref()?;
if value.len() < TXID_LEN::LEN {
return None;
}
if value.len() > TXID_LEN::LEN + 4 {
return None;
}
let (txid, index) = value.split_at(TXID_LEN::LEN);
if let Some(last) = index.last() {
// Accept fixed length encoding with 4 bytes (with potential trailing zeroes)
// or variable length (no trailing zeroes)
if index.len() != 4 && *last == 0 {
return None;
}
}
let txid = Txid::from_slice(txid).unwrap();
let index = [
index.first().copied().unwrap_or(0),
index.get(1).copied().unwrap_or(0),
index.get(2).copied().unwrap_or(0),
index.get(3).copied().unwrap_or(0),
];
let index = u32::from_le_bytes(index);
Some(InscriptionId { txid, index })
}
pub(crate) fn pointer(&self) -> Option<u64> {
let value = self.pointer.as_ref()?;
if value.iter().skip(8).copied().any(|byte| byte != 0) {
return None;
}
let pointer = [
value.first().copied().unwrap_or(0),
value.get(1).copied().unwrap_or(0),
value.get(2).copied().unwrap_or(0),
value.get(3).copied().unwrap_or(0),
value.get(4).copied().unwrap_or(0),
value.get(5).copied().unwrap_or(0),
value.get(6).copied().unwrap_or(0),
value.get(7).copied().unwrap_or(0),
];
Some(u64::from_le_bytes(pointer))
}
#[cfg(test)]
pub(crate) fn to_witness(&self) -> chainhook_sdk::bitcoin::Witness {
let builder = script::Builder::new();
let script = self.append_reveal_script(builder);
let mut witness = chainhook_sdk::bitcoin::Witness::new();
witness.push(script);
witness.push([]);
witness
}
}
#[cfg(test)]
mod tests {
use chainhook_sdk::bitcoin::Witness;
use super::*;
fn inscription(content_type: &str, body: impl AsRef<[u8]>) -> Inscription {
Inscription::new(Some(content_type.into()), Some(body.as_ref().into()))
}
fn envelope(payload: &[&[u8]]) -> Witness {
let mut builder = script::Builder::new()
.push_opcode(opcodes::OP_FALSE)
.push_opcode(opcodes::all::OP_IF);
for data in payload {
let mut buf = PushBytesBuf::new();
buf.extend_from_slice(data).unwrap();
builder = builder.push_slice(buf);
}
let script = builder.push_opcode(opcodes::all::OP_ENDIF).into_script();
Witness::from_slice(&[script.into_bytes(), Vec::new()])
}
#[test]
fn reveal_script_chunks_body() {
assert_eq!(
inscription("foo", [])
.append_reveal_script(script::Builder::new())
.instructions()
.count(),
7
);
assert_eq!(
inscription("foo", [0; 1])
.append_reveal_script(script::Builder::new())
.instructions()
.count(),
8
);
assert_eq!(
inscription("foo", [0; 520])
.append_reveal_script(script::Builder::new())
.instructions()
.count(),
8
);
assert_eq!(
inscription("foo", [0; 521])
.append_reveal_script(script::Builder::new())
.instructions()
.count(),
9
);
assert_eq!(
inscription("foo", [0; 1040])
.append_reveal_script(script::Builder::new())
.instructions()
.count(),
9
);
assert_eq!(
inscription("foo", [0; 1041])
.append_reveal_script(script::Builder::new())
.instructions()
.count(),
10
);
}
#[test]
fn reveal_script_chunks_metadata() {
assert_eq!(
Inscription {
metadata: None,
..Default::default()
}
.append_reveal_script(script::Builder::new())
.instructions()
.count(),
4
);
assert_eq!(
Inscription {
metadata: Some(Vec::new()),
..Default::default()
}
.append_reveal_script(script::Builder::new())
.instructions()
.count(),
4
);
assert_eq!(
Inscription {
metadata: Some(vec![0; 1]),
..Default::default()
}
.append_reveal_script(script::Builder::new())
.instructions()
.count(),
6
);
assert_eq!(
Inscription {
metadata: Some(vec![0; 520]),
..Default::default()
}
.append_reveal_script(script::Builder::new())
.instructions()
.count(),
6
);
assert_eq!(
Inscription {
metadata: Some(vec![0; 521]),
..Default::default()
}
.append_reveal_script(script::Builder::new())
.instructions()
.count(),
8
);
}
#[test]
fn inscription_with_no_parent_field_has_no_parent() {
assert!(Inscription {
parent: None,
..Default::default()
}
.parent()
.is_none());
}
#[test]
fn inscription_with_parent_field_shorter_than_txid_length_has_no_parent() {
assert!(Inscription {
parent: Some(vec![]),
..Default::default()
}
.parent()
.is_none());
}
#[test]
fn inscription_with_parent_field_longer_than_txid_and_index_has_no_parent() {
assert!(Inscription {
parent: Some(vec![1; 37]),
..Default::default()
}
.parent()
.is_none());
}
#[test]
fn inscription_with_parent_field_index_with_trailing_zeroes_and_fixed_length_has_parent() {
let mut parent = vec![1; 36];
parent[35] = 0;
assert!(Inscription {
parent: Some(parent),
..Default::default()
}
.parent()
.is_some());
}
#[test]
fn inscription_with_parent_field_index_with_trailing_zeroes_and_variable_length_has_no_parent()
{
let mut parent = vec![1; 35];
parent[34] = 0;
assert!(Inscription {
parent: Some(parent),
..Default::default()
}
.parent()
.is_none());
}
#[test]
fn inscription_parent_txid_is_deserialized_correctly() {
assert_eq!(
Inscription {
parent: Some(vec![
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c,
0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19,
0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
]),
..Default::default()
}
.parent()
.unwrap()
.txid,
"1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100"
.parse()
.unwrap()
);
}
#[test]
fn inscription_parent_with_zero_byte_index_field_is_deserialized_correctly() {
assert_eq!(
Inscription {
parent: Some(vec![1; 32]),
..Default::default()
}
.parent()
.unwrap()
.index,
0
);
}
#[test]
fn inscription_parent_with_one_byte_index_field_is_deserialized_correctly() {
assert_eq!(
Inscription {
parent: Some(vec![
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01
]),
..Default::default()
}
.parent()
.unwrap()
.index,
1
);
}
#[test]
fn inscription_parent_with_two_byte_index_field_is_deserialized_correctly() {
assert_eq!(
Inscription {
parent: Some(vec![
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x02
]),
..Default::default()
}
.parent()
.unwrap()
.index,
0x0201,
);
}
#[test]
fn inscription_parent_with_three_byte_index_field_is_deserialized_correctly() {
assert_eq!(
Inscription {
parent: Some(vec![
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x02, 0x03
]),
..Default::default()
}
.parent()
.unwrap()
.index,
0x030201,
);
}
#[test]
fn inscription_parent_with_four_byte_index_field_is_deserialized_correctly() {
assert_eq!(
Inscription {
parent: Some(vec![
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x02, 0x03, 0x04,
]),
..Default::default()
}
.parent()
.unwrap()
.index,
0x04030201,
);
}
#[test]
fn pointer_decode() {
assert_eq!(
Inscription {
pointer: None,
..Default::default()
}
.pointer(),
None
);
assert_eq!(
Inscription {
pointer: Some(vec![0]),
..Default::default()
}
.pointer(),
Some(0),
);
assert_eq!(
Inscription {
pointer: Some(vec![1, 2, 3, 4, 5, 6, 7, 8]),
..Default::default()
}
.pointer(),
Some(0x0807060504030201),
);
assert_eq!(
Inscription {
pointer: Some(vec![1, 2, 3, 4, 5, 6]),
..Default::default()
}
.pointer(),
Some(0x0000060504030201),
);
assert_eq!(
Inscription {
pointer: Some(vec![1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0]),
..Default::default()
}
.pointer(),
Some(0x0807060504030201),
);
assert_eq!(
Inscription {
pointer: Some(vec![1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 1]),
..Default::default()
}
.pointer(),
None,
);
assert_eq!(
Inscription {
pointer: Some(vec![1, 2, 3, 4, 5, 6, 7, 8, 1]),
..Default::default()
}
.pointer(),
None,
);
}
#[test]
fn pointer_encode() {
assert_eq!(
Inscription {
pointer: None,
..Default::default()
}
.to_witness(),
envelope(&[b"ord"]),
);
assert_eq!(
Inscription {
pointer: Some(vec![1, 2, 3]),
..Default::default()
}
.to_witness(),
envelope(&[b"ord", &[2], &[1, 2, 3]]),
);
}
}

View File

@@ -3,7 +3,7 @@ use std::{
str::FromStr,
};
use chainhook_sdk::bitcoincore_rpc::bitcoin::Txid;
use chainhook_sdk::bitcoin::Txid;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use super::deserialize_from_str::DeserializeFromStr;
@@ -43,7 +43,7 @@ pub enum ParseError {
Character(char),
Length(usize),
Separator(char),
Txid(chainhook_sdk::bitcoincore_rpc::bitcoin::hashes::hex::Error),
Txid(chainhook_sdk::bitcoin::hashes::hex::HexToArrayError),
Index(std::num::ParseIntError),
}

View File

@@ -0,0 +1,102 @@
use std::{
fmt::{Display, Formatter},
str::FromStr,
};
use anyhow::{anyhow, Error};
#[derive(Debug, PartialEq, Copy, Clone)]
pub(crate) enum Media {
Audio,
Code(Language),
Font,
Iframe,
Image,
Markdown,
Model,
Pdf,
Text,
Unknown,
Video,
}
#[derive(Debug, PartialEq, Copy, Clone)]
pub(crate) enum Language {
Css,
JavaScript,
Json,
Python,
Yaml,
}
impl Display for Language {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
write!(
f,
"{}",
match self {
Self::Css => "css",
Self::JavaScript => "javascript",
Self::Json => "json",
Self::Python => "python",
Self::Yaml => "yaml",
}
)
}
}
impl Media {
#[rustfmt::skip]
const TABLE: &'static [(&'static str, Media, &'static [&'static str])] = &[
("application/cbor", Media::Unknown, &["cbor"]),
("application/json", Media::Code(Language::Json), &["json"]),
("application/octet-stream", Media::Unknown, &["bin"]),
("application/pdf", Media::Pdf, &["pdf"]),
("application/pgp-signature", Media::Text, &["asc"]),
("application/protobuf", Media::Unknown, &["binpb"]),
("application/x-javascript", Media::Code(Language::JavaScript), &[]),
("application/yaml", Media::Code(Language::Yaml), &["yaml", "yml"]),
("audio/flac", Media::Audio, &["flac"]),
("audio/mpeg", Media::Audio, &["mp3"]),
("audio/wav", Media::Audio, &["wav"]),
("font/otf", Media::Font, &["otf"]),
("font/ttf", Media::Font, &["ttf"]),
("font/woff", Media::Font, &["woff"]),
("font/woff2", Media::Font, &["woff2"]),
("image/apng", Media::Image, &["apng"]),
("image/avif", Media::Image, &[]),
("image/gif", Media::Image, &["gif"]),
("image/jpeg", Media::Image, &["jpg", "jpeg"]),
("image/png", Media::Image, &["png"]),
("image/svg+xml", Media::Iframe, &["svg"]),
("image/webp", Media::Image, &["webp"]),
("model/gltf+json", Media::Model, &["gltf"]),
("model/gltf-binary", Media::Model, &["glb"]),
("model/stl", Media::Unknown, &["stl"]),
("text/css", Media::Code(Language::Css), &["css"]),
("text/html", Media::Iframe, &[]),
("text/html;charset=utf-8", Media::Iframe, &["html"]),
("text/javascript", Media::Code(Language::JavaScript), &["js"]),
("text/markdown", Media::Markdown, &[]),
("text/markdown;charset=utf-8", Media::Markdown, &["md"]),
("text/plain", Media::Text, &[]),
("text/plain;charset=utf-8", Media::Text, &["txt"]),
("text/x-python", Media::Code(Language::Python), &["py"]),
("video/mp4", Media::Video, &["mp4"]),
("video/webm", Media::Video, &["webm"]),
];
}
impl FromStr for Media {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
for entry in Self::TABLE {
if entry.0 == s {
return Ok(entry.1);
}
}
Err(anyhow!("unknown content type: {s}"))
}
}

View File

@@ -7,9 +7,12 @@ use chainhook_sdk::types::BitcoinNetwork;
pub mod chain;
pub mod deserialize_from_str;
pub mod envelope;
pub mod epoch;
pub mod height;
pub mod inscription;
pub mod inscription_id;
pub mod media;
pub mod sat;
pub mod sat_point;
@@ -18,3 +21,4 @@ const DIFFCHANGE_INTERVAL: u64 =
const SUBSIDY_HALVING_INTERVAL: u64 =
chainhook_sdk::bitcoincore_rpc::bitcoin::blockdata::constants::SUBSIDY_HALVING_INTERVAL as u64;
const CYCLE_EPOCHS: u64 = 6;
pub const COIN_VALUE: u64 = 100_000_000;

View File

@@ -90,7 +90,7 @@ impl AddAssign<u64> for Sat {
#[cfg(test)]
mod tests {
use chainhook_sdk::bitcoincore_rpc::bitcoin::blockdata::constants::COIN_VALUE;
use super::COIN_VALUE;
use super::*;

View File

@@ -139,7 +139,7 @@ pub async fn scan_bitcoin_chainstate_via_rpc_using_predicate(
let inscriptions_revealed = get_inscriptions_revealed_in_block(&block)
.iter()
.map(|d| d.inscription_number.to_string())
.map(|d| d.get_inscription_number().to_string())
.collect::<Vec<String>>();
let inscriptions_transferred = get_inscriptions_transferred_in_block(&block).len();
@@ -234,7 +234,7 @@ pub async fn execute_predicates_action<'a>(
Ok(action) => {
actions_triggered += 1;
match action {
BitcoinChainhookOccurrence::Http(request) => {
BitcoinChainhookOccurrence::Http(request, _data) => {
send_request(request, 60, 3, &ctx).await?
}
BitcoinChainhookOccurrence::File(path, bytes) => {

View File

@@ -17,8 +17,11 @@ use crate::core::{new_traversals_lazy_cache, should_sync_ordhook_db, should_sync
use crate::db::{
delete_data_in_ordhook_db, insert_entry_in_blocks, open_ordhook_db_conn_rocks_db_loop,
open_readwrite_ordhook_db_conn, open_readwrite_ordhook_dbs, update_inscriptions_with_block,
update_locations_with_block, update_sequence_metadata_with_block, LazyBlock,
LazyBlockTransaction,
update_locations_with_block, BlockBytesCursor, TransactionBytesCursor,
};
use crate::db::{
find_last_block_inserted, find_missing_blocks, run_compaction,
update_sequence_metadata_with_block,
};
use crate::scan::bitcoin::process_block_with_predicates;
use crate::service::http_api::start_predicate_api_server;
@@ -28,7 +31,6 @@ use crate::service::observers::{
update_observer_streaming_enabled, ObserverReport,
};
use crate::service::runloops::start_bitcoin_scan_runloop;
use chainhook_sdk::chainhooks::bitcoin::BitcoinChainhookOccurrencePayload;
use chainhook_sdk::chainhooks::types::{
BitcoinChainhookSpecification, ChainhookFullSpecification, ChainhookSpecification,
@@ -69,7 +71,7 @@ impl Service {
let mut event_observer_config = self.config.get_event_observer_config();
// Catch-up with chain tip
let chain_tip_height = self.catch_up_with_chain_tip(false).await?;
let chain_tip_height = self.catch_up_with_chain_tip(false, false).await?;
info!(
self.ctx.expect_logger(),
"Database up to date, service will start streaming blocks"
@@ -442,23 +444,69 @@ impl Service {
pub async fn catch_up_with_chain_tip(
&mut self,
rebuild_from_scratch: bool,
compact_and_check_rocksdb_integrity: bool,
) -> Result<u64, String> {
if rebuild_from_scratch {
let blocks_db = open_ordhook_db_conn_rocks_db_loop(
true,
&self.config.expected_cache_path(),
&self.ctx,
);
let inscriptions_db_conn_rw =
open_readwrite_ordhook_db_conn(&self.config.expected_cache_path(), &self.ctx)?;
{
if compact_and_check_rocksdb_integrity {
let (tip, missing_blocks) = {
let blocks_db = open_ordhook_db_conn_rocks_db_loop(
false,
&self.config.expected_cache_path(),
&self.ctx,
);
let tip = find_last_block_inserted(&blocks_db);
info!(
self.ctx.expect_logger(),
"Checking database integrity up to block #{tip}",
);
let missing_blocks = find_missing_blocks(&blocks_db, 0, tip, &self.ctx);
(tip, missing_blocks)
};
if !missing_blocks.is_empty() {
info!(
self.ctx.expect_logger(),
"{} missing blocks detected, will attempt to repair data",
missing_blocks.len()
);
let block_ingestion_processor =
start_block_archiving_processor(&self.config, &self.ctx, false, None);
download_and_pipeline_blocks(
&self.config,
missing_blocks.into_iter().map(|x| x as u64).collect(),
tip.into(),
Some(&block_ingestion_processor),
10_000,
&self.ctx,
)
.await?;
}
let blocks_db_rw = open_ordhook_db_conn_rocks_db_loop(
false,
&self.config.expected_cache_path(),
&self.ctx,
);
info!(self.ctx.expect_logger(), "Running database compaction",);
run_compaction(&blocks_db_rw, tip);
}
delete_data_in_ordhook_db(
767430,
820000,
&blocks_db,
&inscriptions_db_conn_rw,
&self.ctx,
)?;
if rebuild_from_scratch {
let blocks_db_rw = open_ordhook_db_conn_rocks_db_loop(
false,
&self.config.expected_cache_path(),
&self.ctx,
);
let inscriptions_db_conn_rw =
open_readwrite_ordhook_db_conn(&self.config.expected_cache_path(), &self.ctx)?;
delete_data_in_ordhook_db(
767430,
820000,
&blocks_db_rw,
&inscriptions_db_conn_rw,
&self.ctx,
)?;
}
}
self.update_state(None).await
}
@@ -598,8 +646,8 @@ fn chainhook_sidecar_mutate_ordhook_db(command: HandleBlock, config: &Config, ct
}
}
HandleBlock::ApplyBlock(block) => {
let compressed_block: LazyBlock = match LazyBlock::from_standardized_block(&block) {
Ok(block) => block,
let block_bytes = match BlockBytesCursor::from_standardized_block(&block) {
Ok(block_bytes) => block_bytes,
Err(e) => {
ctx.try_log(|logger| {
error!(
@@ -614,7 +662,7 @@ fn chainhook_sidecar_mutate_ordhook_db(command: HandleBlock, config: &Config, ct
};
insert_entry_in_blocks(
block.block_identifier.index as u32,
&compressed_block,
&block_bytes,
true,
&blocks_db_rw,
&ctx,
@@ -668,7 +716,7 @@ pub fn start_observer_forwarding(
pub fn chainhook_sidecar_mutate_blocks(
blocks_to_mutate: &mut Vec<BitcoinBlockDataCached>,
blocks_ids_to_rollback: &Vec<BlockIdentifier>,
cache_l2: &Arc<DashMap<(u32, [u8; 8]), LazyBlockTransaction, BuildHasherDefault<FxHasher>>>,
cache_l2: &Arc<DashMap<(u32, [u8; 8]), TransactionBytesCursor, BuildHasherDefault<FxHasher>>>,
config: &Config,
ctx: &Context,
) {
@@ -705,8 +753,8 @@ pub fn chainhook_sidecar_mutate_blocks(
let ordhook_config = config.get_ordhook_config();
for cache in blocks_to_mutate.iter_mut() {
let compressed_block: LazyBlock = match LazyBlock::from_standardized_block(&cache.block) {
Ok(block) => block,
let block_bytes = match BlockBytesCursor::from_standardized_block(&cache.block) {
Ok(block_bytes) => block_bytes,
Err(e) => {
ctx.try_log(|logger| {
error!(
@@ -722,7 +770,7 @@ pub fn chainhook_sidecar_mutate_blocks(
insert_entry_in_blocks(
cache.block.block_identifier.index as u32,
&compressed_block,
&block_bytes,
true,
&blocks_db_rw,
&ctx,
@@ -754,7 +802,7 @@ pub fn chainhook_sidecar_mutate_blocks(
let inscriptions_revealed = get_inscriptions_revealed_in_block(&cache.block)
.iter()
.map(|d| d.inscription_number.to_string())
.map(|d| d.get_inscription_number().to_string())
.collect::<Vec<String>>();
let inscriptions_transferred =

View File

@@ -19,12 +19,12 @@ serde = "1"
[build-dependencies]
napi-build = "2.0.1"
[build]
target = "armv7-unknown-linux-gnueabihf"
rustflags = ["-C", "link-args=-L/lib/arm-linux-gnueabihf"]
# [build]
# target = "armv7-unknown-linux-gnueabihf"
# rustflags = ["-C", "link-args=-L/lib/arm-linux-gnueabihf"]
[target.armv7-unknown-linux-gnueabihf]
linker = "arm-linux-gnueabihf-g++"
# [target.armv7-unknown-linux-gnueabihf]
# linker = "arm-linux-gnueabihf-g++"
[profile.release]
lto = true
# [profile.release]
# lto = true

View File

@@ -159,7 +159,7 @@ impl OrdinalsIndexingRunloop {
match cmd {
IndexerCommand::StreamBlocks => {
// We start the service as soon as the start() method is being called.
let future = service.catch_up_with_chain_tip(false);
let future = service.catch_up_with_chain_tip(false, true);
let _ = hiro_system_kit::nestable_block_on(future).expect("unable to start indexer");
let future = service.start_event_observer(observer_sidecar);
let (command_tx, event_rx) =

View File

@@ -22,6 +22,10 @@ RUN apt-get install nodejs -y
RUN npm install -g @napi-rs/cli yarn
COPY ./Cargo.toml /src/Cargo.toml
COPY ./Cargo.lock /src/Cargo.lock
COPY ./components/ordhook-core /src/components/ordhook-core
COPY ./components/ordhook-sdk-js /src/components/ordhook-sdk-js
@@ -40,7 +44,7 @@ WORKDIR /src/components/ordhook-cli
RUN cargo build --features release --release
RUN cp target/release/ordhook /out
RUN cp /src/target/release/ordhook /out
FROM debian:bullseye-slim