diff --git a/CODEOWNERS b/CODEOWNERS index 83a3203467d..7216063442e 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -17,4 +17,5 @@ crates/blockchain-tree @rakita @rkrasiuk crates/metrics @onbjerg crates/tracing @onbjerg crates/tasks @mattsse +crates/prune @shekhirin @joshieDo .github/ @onbjerg @gakonst \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 5607f18dac9..ae2695c24f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -25,9 +25,9 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ "gimli", ] @@ -181,24 +181,23 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.3.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163" +checksum = "b1f58811cfac344940f1a400b6e6231ce35171f614f26439e80f8c1465c5cc0c" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", - "is-terminal", "utf8parse", ] [[package]] name = "anstyle" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" +checksum = "15c4c2c83f81532e5845a733998b6971faca23490340a418e9b72a3ec9de12ea" [[package]] name = "anstyle-parse" @@ -220,9 +219,9 @@ dependencies = [ [[package]] name = "anstyle-wincon" -version = "1.0.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c677ab05e09154296dd37acecd46420c17b9713e8366facafa8fc0885167cf4c" +checksum = "58f54d10c6dfa51283a066ceab3ec1ab78d13fae00aa49243a45e4571fb79dfd" dependencies = [ "anstyle", "windows-sys 0.48.0", @@ -230,9 +229,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.74" +version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c6f84b74db2535ebae81eede2f39b947dcbf01d093ae5f791e5dd414a1bf289" +checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" [[package]] name = "aquamarine" @@ -244,7 +243,7 @@ dependencies = [ "itertools 0.10.5", "proc-macro-error", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -301,7 +300,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" dependencies = [ - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -311,7 +310,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" dependencies = [ - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -323,7 +322,7 @@ checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" dependencies = [ "num-bigint", "num-traits", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -336,7 +335,7 @@ dependencies = [ "num-bigint", "num-traits", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -434,8 +433,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -487,7 +486,7 @@ checksum = "fee3da8ef1276b0bee5dd1c7258010d8fffd31801447323115a25560e1327b89" dependencies = [ "proc-macro-error", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -511,9 +510,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" +checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" dependencies = [ "addr2line", "cc", @@ -538,9 +537,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.2" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" +checksum = "414dcefbc63d77c526a76b3afcf6fbb9b5e2791c19c3aa2297733208750c6e53" [[package]] name = "base64ct" @@ -559,9 +558,9 @@ dependencies = [ [[package]] name = "bech32" -version = "0.7.3" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dabbe35f96fb9507f7330793dc490461b2962659ac5d427181e451a623751d1" +checksum = "d86b93f97252c47b41663388e6d155714a9d0c398b99f1005cbc5f978b29f445" [[package]] name = "beef" @@ -594,7 +593,7 @@ dependencies = [ "lazycell", "peeking_take_while", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "regex", "rustc-hash", "shlex", @@ -614,11 +613,11 @@ dependencies = [ "log", "peeking_take_while", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "regex", "rustc-hash", "shlex", - "syn 2.0.28", + "syn 2.0.29", "which", ] @@ -636,11 +635,11 @@ dependencies = [ "peeking_take_while", "prettyplease", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "regex", "rustc-hash", "shlex", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -670,16 +669,6 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" -[[package]] -name = "bitvec" -version = "0.17.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41262f11d771fd4a61aa3ce019fca363b4b6c282fca9da2a31186d3965a47a5c" -dependencies = [ - "either", - "radium 0.3.0", -] - [[package]] name = "bitvec" version = "1.0.1" @@ -687,7 +676,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ "funty", - "radium 0.7.0", + "radium", "serde", "tap", "wyz", @@ -723,7 +712,7 @@ dependencies = [ [[package]] name = "boa_ast" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#06bb71aa50e7163f6b38767190ba30177086685f" +source = "git+https://github.com/boa-dev/boa#b47087c02866e914356a990267083c04604d1cb9" dependencies = [ "bitflags 2.4.0", "boa_interner", @@ -736,7 +725,7 @@ dependencies = [ [[package]] name = "boa_engine" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#06bb71aa50e7163f6b38767190ba30177086685f" +source = "git+https://github.com/boa-dev/boa#b47087c02866e914356a990267083c04604d1cb9" dependencies = [ "bitflags 2.4.0", "boa_ast", @@ -755,7 +744,7 @@ dependencies = [ "num-bigint", "num-integer", "num-traits", - "num_enum 0.7.0", + "num_enum", "once_cell", "pollster", "rand 0.8.5", @@ -774,7 +763,7 @@ dependencies = [ [[package]] name = "boa_gc" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#06bb71aa50e7163f6b38767190ba30177086685f" +source = "git+https://github.com/boa-dev/boa#b47087c02866e914356a990267083c04604d1cb9" dependencies = [ "boa_macros", "boa_profiler", @@ -785,7 +774,7 @@ dependencies = [ [[package]] name = "boa_icu_provider" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#06bb71aa50e7163f6b38767190ba30177086685f" +source = "git+https://github.com/boa-dev/boa#b47087c02866e914356a990267083c04604d1cb9" dependencies = [ "icu_collections", "icu_normalizer", @@ -798,7 +787,7 @@ dependencies = [ [[package]] name = "boa_interner" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#06bb71aa50e7163f6b38767190ba30177086685f" +source = "git+https://github.com/boa-dev/boa#b47087c02866e914356a990267083c04604d1cb9" dependencies = [ "boa_gc", "boa_macros", @@ -813,18 +802,18 @@ dependencies = [ [[package]] name = "boa_macros" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#06bb71aa50e7163f6b38767190ba30177086685f" +source = "git+https://github.com/boa-dev/boa#b47087c02866e914356a990267083c04604d1cb9" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", "synstructure 0.13.0", ] [[package]] name = "boa_parser" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#06bb71aa50e7163f6b38767190ba30177086685f" +source = "git+https://github.com/boa-dev/boa#b47087c02866e914356a990267083c04604d1cb9" dependencies = [ "bitflags 2.4.0", "boa_ast", @@ -844,7 +833,7 @@ dependencies = [ [[package]] name = "boa_profiler" version = "0.17.0" -source = "git+https://github.com/boa-dev/boa#06bb71aa50e7163f6b38767190ba30177086685f" +source = "git+https://github.com/boa-dev/boa#b47087c02866e914356a990267083c04604d1cb9" [[package]] name = "boyer-moore-magiclen" @@ -878,11 +867,12 @@ dependencies = [ [[package]] name = "bs58" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" +checksum = "f5353f36341f7451062466f0b755b96ac3a9547e4d7f6b70d603fc721a7d7896" dependencies = [ - "sha2 0.9.9", + "sha2", + "tinyvec", ] [[package]] @@ -932,7 +922,7 @@ dependencies = [ [[package]] name = "c-kzg" version = "0.1.0" -source = "git+https://github.com/rjected/c-kzg-4844?branch=dan/add-serde-feature#4c95d6b8850f4f22a25fed0cf207560711cefe2b" +source = "git+https://github.com/rjected/c-kzg-4844?branch=dan/add-serde-feature#d45a4cf712c1883f42f0ca3bb94aea3b3e7e4880" dependencies = [ "bindgen 0.64.0 (git+https://github.com/rust-lang/rust-bindgen?rev=0de11f0a521611ac8738b7b01d19dddaf3899e66)", "cc", @@ -974,6 +964,20 @@ dependencies = [ "thiserror", ] +[[package]] +name = "cargo_metadata" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7daec1a2a2129eeba1644b220b4647ec537b0b5d4bfd6876fcc5a540056b592" +dependencies = [ + "camino", + "cargo-platform", + "semver 1.0.18", + "serde", + "serde_json", + "thiserror", +] + [[package]] name = "cassowary" version = "0.3.0" @@ -988,9 +992,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.82" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "305fe645edc1442a0fa8b6726ba61d422798d37a52e12eaecf4b022ebbb88f01" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" dependencies = [ "jobserver", "libc", @@ -1083,9 +1087,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.3.21" +version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c27cdf28c0f604ba3f512b0c9a409f8de8513e4816705deb0498b627e7c3a3fd" +checksum = "1d5f1946157a96594eb2d2c10eb7ad9a2b27518cb3000209dec700c35df9197d" dependencies = [ "clap_builder", "clap_derive", @@ -1094,9 +1098,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.3.21" +version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08a9f1ab5e9f01a9b81f202e8562eb9a10de70abf9eaeac1be465c28b75aa4aa" +checksum = "78116e32a042dd73c2901f0dc30790d20ff3447f3e3472fad359e8c3d282bcd6" dependencies = [ "anstream", "anstyle", @@ -1106,21 +1110,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.3.12" +version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54a9bb5758fc5dfe728d1019941681eccaf0cf8a4189b692a0ee2f2ecf90a050" +checksum = "c9fd1a5729c4548118d7d70ff234a44868d00489a4b6597b0b020918a0e91a1a" dependencies = [ "heck", "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] name = "clap_lex" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" +checksum = "cd7cc57abe963c6d3b9d8be5b06ba7c8957a930305ca90304f24ef040aa6f961" [[package]] name = "cobs" @@ -1130,59 +1134,55 @@ checksum = "67ba02a97a2bd10f4b59b25c7973101c79642302776489e030cd13cdab09ed15" [[package]] name = "codecs-derive" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "convert_case 0.6.0", "parity-scale-codec", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "serde", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] name = "coins-bip32" -version = "0.8.3" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b30a84aab436fcb256a2ab3c80663d8aec686e6bae12827bb05fef3e1e439c9f" +checksum = "3b6be4a5df2098cd811f3194f64ddb96c267606bffd9689ac7b0160097b01ad3" dependencies = [ - "bincode", "bs58", "coins-core", "digest 0.10.7", - "getrandom 0.2.10", "hmac", "k256", - "lazy_static", "serde", - "sha2 0.10.7", + "sha2", "thiserror", ] [[package]] name = "coins-bip39" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84f4d04ee18e58356accd644896aeb2094ddeafb6a713e056cef0c0a8e468c15" +checksum = "3db8fba409ce3dc04f7d804074039eb68b960b0829161f8e06c95fea3f122528" dependencies = [ - "bitvec 0.17.4", + "bitvec", "coins-bip32", - "getrandom 0.2.10", "hmac", "once_cell", "pbkdf2 0.12.2", "rand 0.8.5", - "sha2 0.10.7", + "sha2", "thiserror", ] [[package]] name = "coins-core" -version = "0.8.3" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b949a1c63fb7eb591eb7ba438746326aedf0ae843e51ec92ba6bec5bb382c4f" +checksum = "5286a0843c21f8367f7be734f89df9b822e0321d8bcce8d6e735aadff7d74979" dependencies = [ - "base64 0.21.2", + "base64 0.21.3", "bech32", "bs58", "digest 0.10.7", @@ -1191,7 +1191,7 @@ dependencies = [ "ripemd", "serde", "serde_derive", - "sha2 0.10.7", + "sha2", "sha3", "thiserror", ] @@ -1238,6 +1238,18 @@ dependencies = [ "windows-sys 0.45.0", ] +[[package]] +name = "const-hex" +version = "1.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca268df6cd88e646b564e6aff1a016834e5f42077c736ef6b6789c31ef9ec5dc" +dependencies = [ + "cfg-if", + "cpufeatures", + "hex", + "serde", +] + [[package]] name = "const-oid" version = "0.9.5" @@ -1283,9 +1295,9 @@ checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpp_demangle" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee34052ee3d93d6d8f3e6f81d85c47921f6653a19a7b70e939e3e602d893a674" +checksum = "7e8227005286ec39567949b33df9896bcadfa6051bccca2488129f108ca23119" dependencies = [ "cfg-if", ] @@ -1520,8 +1532,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -1553,7 +1565,7 @@ dependencies = [ "fnv", "ident_case", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "strsim 0.9.3", "syn 1.0.109", ] @@ -1567,9 +1579,9 @@ dependencies = [ "fnv", "ident_case", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "strsim 0.10.0", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -1579,7 +1591,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9b5a2f4ac4969822c62224815d069952656cadc7084fdca9751e6d959189b72" dependencies = [ "darling_core 0.10.2", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -1590,15 +1602,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core 0.20.3", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] name = "dashmap" -version = "5.5.0" +version = "5.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6943ae99c34386c84a470c499d3414f66502a41340aa895406e0d2e4a207b91d" +checksum = "edd72493923899c6f10c641bdbdeddc7183d6396641d99c1a0d1597f37f92e28" dependencies = [ "cfg-if", "hashbrown 0.14.0", @@ -1650,9 +1662,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7684a49fb1af197853ef7b2ee694bc1f5b4179556f1e5710e1760c5db6f5e929" +checksum = "f2696e8a945f658fd14dc3b87242e6b80cd0f36ff04ea560fa39082368847946" dependencies = [ "serde", ] @@ -1664,7 +1676,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -1675,8 +1687,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53e0efad4403bfc52dc201159c4b842a246a14b98c64b55dfd0f2d89729dfeb8" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -1688,7 +1700,7 @@ dependencies = [ "darling 0.10.2", "derive_builder_core", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -1700,7 +1712,7 @@ checksum = "2791ea3e372c8495c0bc2033991d76b512cd799d07491fbd6890124db9458bef" dependencies = [ "darling 0.10.2", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -1712,7 +1724,7 @@ checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case 0.4.0", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "rustc_version 0.4.0", "syn 1.0.109", ] @@ -1815,13 +1827,13 @@ dependencies = [ [[package]] name = "discv5" version = "0.3.1" -source = "git+https://github.com/sigp/discv5#1439decd4e7d7c9de78ef61b5d67be3fee688510" +source = "git+https://github.com/sigp/discv5#d2e30e04ee62418b9e57278cee907c02b99d5bd1" dependencies = [ "aes 0.7.5", "aes-gcm", "arrayvec", "delay_map", - "enr 0.9.0", + "enr", "fnv", "futures", "hashlink", @@ -1849,8 +1861,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -1879,9 +1891,9 @@ checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" [[package]] name = "dyn-clone" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "304e6508efa593091e97a9abbc10f90aa7ca635b6d2784feff3c89d41dd12272" +checksum = "bbfc4744c1b8f2a09adc0e55242f60b1af195d88596bd8700be74418c056c555" [[package]] name = "ecdsa" @@ -1917,7 +1929,7 @@ dependencies = [ "ed25519", "rand_core 0.6.4", "serde", - "sha2 0.10.7", + "sha2", "zeroize", ] @@ -1929,13 +1941,13 @@ checksum = "079044df30bb07de7d846d41a184c4b00e66ebdac93ee459253474f3a47e50ae" dependencies = [ "enum-ordinalize", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] [[package]] name = "ef-tests" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "reth-db", "reth-interfaces", @@ -1984,9 +1996,9 @@ checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" [[package]] name = "encoding_rs" -version = "0.8.32" +version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" +checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" dependencies = [ "cfg-if", ] @@ -1997,32 +2009,13 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d" -[[package]] -name = "enr" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf56acd72bb22d2824e66ae8e9e5ada4d0de17a69c7fd35569dde2ada8ec9116" -dependencies = [ - "base64 0.13.1", - "bytes", - "hex", - "k256", - "log", - "rand 0.8.5", - "rlp", - "secp256k1", - "serde", - "sha3", - "zeroize", -] - [[package]] name = "enr" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0be7b2ac146c1f99fe245c02d16af0696450d8e06c135db75e10eeb9e642c20d" dependencies = [ - "base64 0.21.2", + "base64 0.21.3", "bytes", "ed25519-dalek", "hex", @@ -2030,6 +2023,7 @@ dependencies = [ "log", "rand 0.8.5", "rlp", + "secp256k1", "serde", "serde-hex", "sha3", @@ -2044,7 +2038,7 @@ checksum = "570d109b813e904becc80d8d5da38376818a143348413f7149f1340fe04754d4" dependencies = [ "heck", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -2056,7 +2050,7 @@ checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" dependencies = [ "heck", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -2069,8 +2063,8 @@ dependencies = [ "num-bigint", "num-traits", "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -2080,8 +2074,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b893c4eb2dc092c811165f84dc7447fae16fb66521717968c34c509b39b1a5c5" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -2138,7 +2132,7 @@ dependencies = [ "scrypt", "serde", "serde_json", - "sha2 0.10.7", + "sha2", "sha3", "thiserror", "uuid 0.8.2", @@ -2194,17 +2188,16 @@ dependencies = [ [[package]] name = "ethers-contract" -version = "2.0.8" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d4719a44c3d37ab07c6dea99ab174068d8c35e441b60b6c20ce4e48357273e8" +checksum = "02bb80fd2c22631a5eb8a02cbf373cc5fd86937fc966bb670b9a884580c8e71c" dependencies = [ + "const-hex", "ethers-contract-abigen", "ethers-contract-derive", "ethers-core", "ethers-providers", - "ethers-signers", "futures-util", - "hex", "once_cell", "pin-project", "serde", @@ -2214,58 +2207,58 @@ dependencies = [ [[package]] name = "ethers-contract-abigen" -version = "2.0.8" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "155ea1b84d169d231317ed86e307af6f2bed6b40dd17e5e94bc84da21cadb21c" +checksum = "22c54db0d393393e732a5b20273e4f8ab89f0cce501c84e75fab9c126799a6e6" dependencies = [ "Inflector", + "const-hex", "dunce", "ethers-core", "eyre", - "hex", "prettyplease", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "regex", "serde", "serde_json", - "syn 2.0.28", + "syn 2.0.29", "toml 0.7.6", "walkdir", ] [[package]] name = "ethers-contract-derive" -version = "2.0.8" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8567ff196c4a37c1a8c90ec73bda0ad2062e191e4f0a6dc4d943e2ec4830fc88" +checksum = "62ee4f216184a1304b707ed258f4f70aa40bf7e1522ab8963d127a8d516eaa1a" dependencies = [ "Inflector", + "const-hex", "ethers-contract-abigen", "ethers-core", - "hex", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "serde_json", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] name = "ethers-core" -version = "2.0.8" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60ca2514feb98918a0a31de7e1983c29f2267ebf61b2dc5d4294f91e5b866623" +checksum = "8c29523f73c12753165781c6e5dc11c84d3e44c080a15f7c6cfbd70b514cb6f1" dependencies = [ "arrayvec", "bytes", - "cargo_metadata", + "cargo_metadata 0.17.0", "chrono", + "const-hex", "elliptic-curve", "ethabi", "generic-array", - "hex", "k256", - "num_enum 0.6.1", + "num_enum", "once_cell", "open-fastrlp", "rand 0.8.5", @@ -2273,7 +2266,7 @@ dependencies = [ "serde", "serde_json", "strum 0.25.0", - "syn 2.0.28", + "syn 2.0.29", "tempfile", "thiserror", "tiny-keccak", @@ -2282,9 +2275,9 @@ dependencies = [ [[package]] name = "ethers-etherscan" -version = "2.0.8" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22b3a8269d3df0ed6364bc05b4735b95f4bf830ce3aef87d5e760fb0e93e5b91" +checksum = "4aab5af432b3fe5b7756b60df5c9ddeb85a13414575ad8a9acd707c24f0a77a5" dependencies = [ "ethers-core", "reqwest", @@ -2297,9 +2290,9 @@ dependencies = [ [[package]] name = "ethers-middleware" -version = "2.0.8" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0c339aad74ae5c451d27e0e49c7a3c7d22620b119b4f9291d7aa21f72d7f366" +checksum = "356151d5ded56d4918146366abc9dfc9df367cf0096492a7a5477b21b7693615" dependencies = [ "async-trait", "auto_impl", @@ -2324,24 +2317,25 @@ dependencies = [ [[package]] name = "ethers-providers" -version = "2.0.8" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b411b119f1cf0efb69e2190883dee731251882bb21270f893ee9513b3a697c48" +checksum = "00c84664b294e47fc2860d6db0db0246f79c4c724e552549631bb9505b834bee" dependencies = [ "async-trait", "auto_impl", - "base64 0.21.2", + "base64 0.21.3", "bytes", - "enr 0.8.1", + "const-hex", + "enr", "ethers-core", "futures-channel", "futures-core", "futures-timer", "futures-util", "hashers", - "hex", "http", "instant", + "jsonwebtoken", "once_cell", "pin-project", "reqwest", @@ -2361,19 +2355,19 @@ dependencies = [ [[package]] name = "ethers-signers" -version = "2.0.8" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4864d387456a9c09a1157fa10e1528b29d90f1d859443acf06a1b23365fb518c" +checksum = "170b299698702ef1f53d2275af7d6d97409cfa4f9398ee9ff518f6bc9102d0ad" dependencies = [ "async-trait", "coins-bip32", "coins-bip39", + "const-hex", "elliptic-curve", "eth-keystore", "ethers-core", - "hex", "rand 0.8.5", - "sha2 0.10.7", + "sha2", "thiserror", "tracing", ] @@ -2614,8 +2608,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -2696,10 +2690,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", - "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", - "wasm-bindgen", ] [[package]] @@ -2714,9 +2706,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.27.3" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" +checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" [[package]] name = "glob" @@ -2783,9 +2775,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.20" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97ec8491ebaf99c8eaa73058b045fe58073cd6be7f596ac993ced0b0a0c01049" +checksum = "91fc23aa11be92976ef4729127f1a74adf36d8436f7816b185d18df956790833" dependencies = [ "bytes", "fnv", @@ -3195,7 +3187,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd8b728b9421e93eff1d9f8681101b78fa745e0748c95c655c83f337044a7e10" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -3283,7 +3275,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -3303,7 +3295,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b139284b5cf57ecfa712bcc66950bb635b31aff41c188e8a4cfc758eca374a3f" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", ] [[package]] @@ -3335,12 +3327,12 @@ dependencies = [ [[package]] name = "inferno" -version = "0.11.15" +version = "0.11.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fb7c1b80a1dfa604bb4a649a5c5aeef3d913f7c520cb42b40e534e8a61bcdfc" +checksum = "73c0fefcb6d409a6587c07515951495d482006f89a21daa0f2f783aa4fd5e027" dependencies = [ "ahash 0.8.3", - "indexmap 1.9.3", + "indexmap 2.0.0", "is-terminal", "itoa", "log", @@ -3390,7 +3382,7 @@ dependencies = [ "socket2 0.5.3", "widestring", "windows-sys 0.48.0", - "winreg 0.50.0", + "winreg", ] [[package]] @@ -3416,7 +3408,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi", - "rustix 0.38.8", + "rustix 0.38.9", "windows-sys 0.48.0", ] @@ -3588,7 +3580,7 @@ dependencies = [ "heck", "proc-macro-crate", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -3659,7 +3651,7 @@ version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.21.2", + "base64 0.21.3", "pem", "ring", "serde", @@ -3677,7 +3669,7 @@ dependencies = [ "ecdsa", "elliptic-curve", "once_cell", - "sha2 0.10.7", + "sha2", "signature", ] @@ -3900,7 +3892,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a4964177ddfdab1e3a2b37aec7cf320e14169abb0ed73999f558136409178d5" dependencies = [ - "base64 0.21.2", + "base64 0.21.3", "hyper", "indexmap 1.9.3", "ipnet", @@ -3919,8 +3911,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -3935,7 +3927,7 @@ dependencies = [ "once_cell", "procfs", "rlimit", - "windows 0.51.0", + "windows 0.51.1", ] [[package]] @@ -4023,7 +4015,7 @@ checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" dependencies = [ "cfg-if", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -4044,7 +4036,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a7d5f7076603ebc68de2dc6a650ec331a062a13abaa346975be747bbfa4b789" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -4123,9 +4115,9 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" dependencies = [ "autocfg", "num-integer", @@ -4205,34 +4197,13 @@ dependencies = [ "libc", ] -[[package]] -name = "num_enum" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a015b430d3c108a207fd776d2e2196aaf8b1cf8cf93253e3a097ff3085076a1" -dependencies = [ - "num_enum_derive 0.6.1", -] - [[package]] name = "num_enum" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70bf6736f74634d299d00086f02986875b3c2d924781a6a2cb6c201e73da0ceb" dependencies = [ - "num_enum_derive 0.7.0", -] - -[[package]] -name = "num_enum_derive" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" -dependencies = [ - "proc-macro-crate", - "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "num_enum_derive", ] [[package]] @@ -4243,8 +4214,8 @@ checksum = "56ea360eafe1022f7cc56cd7b869ed57330fb2453d0c7831d99b74c65d2f5597" dependencies = [ "proc-macro-crate", "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -4258,9 +4229,9 @@ dependencies = [ [[package]] name = "object" -version = "0.31.1" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1" +checksum = "77ac5bbd07aea88c60a577a1ce218075ffd59208b2d7ca97adf9bfc5aeb21ebe" dependencies = [ "memchr", ] @@ -4308,7 +4279,7 @@ checksum = "003b2be5c6c53c1cfeb0a238b8a1c3915cd410feb684457a36c10038f764bb1c" dependencies = [ "bytes", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -4326,9 +4297,9 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "ordered-float" -version = "3.8.0" +version = "3.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7417b1484e3641a8791af3c3123cdc083ac60a0d262a2f281b6125d58917caf4" +checksum = "2a54938017eacd63036332b4ae5c8a49fc8c0c1d6d629893057e4f13609edd06" dependencies = [ "num-traits", ] @@ -4356,7 +4327,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd8e946cc0cc711189c0b0249fb8b599cbeeab9784d83c415719368bb8d4ac64" dependencies = [ "arrayvec", - "bitvec 1.0.1", + "bitvec", "byte-slice-cast", "bytes", "impl-trait-for-tuples", @@ -4372,7 +4343,7 @@ checksum = "2a296c3079b5fefbc499e1de58dc26c09b1b9a5952d26694ee89f04a43ebbb3e" dependencies = [ "proc-macro-crate", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -4435,7 +4406,7 @@ dependencies = [ "libc", "redox_syscall 0.3.5", "smallvec 1.11.0", - "windows-targets 0.48.2", + "windows-targets 0.48.5", ] [[package]] @@ -4533,8 +4504,8 @@ dependencies = [ "phf_generator", "phf_shared", "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -4562,15 +4533,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] name = "pin-project-lite" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12cc1b0bf1727a77a54b6654e7b5f1af8604923edc8b81885f8ec92f9e3f0a05" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" [[package]] name = "pin-utils" @@ -4657,9 +4628,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f32154ba0af3a075eefa1eda8bb414ee928f62303a54ea85b8d6638ff1a6ee9e" +checksum = "31114a898e107c51bb1609ffaf55a0e011cf6a4d7f1170d0015a165082c0338b" [[package]] name = "postcard" @@ -4747,7 +4718,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62" dependencies = [ "proc-macro2 1.0.66", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -4782,7 +4753,7 @@ checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", "version_check", ] @@ -4794,7 +4765,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "version_check", ] @@ -4923,19 +4894,13 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.32" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f3b39ccfb720540debaa0164757101c08ecb8d326b15358ce76a62c7e85965" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ "proc-macro2 1.0.66", ] -[[package]] -name = "radium" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac" - [[package]] name = "radium" version = "0.7.0" @@ -5094,14 +5059,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.3" +version = "1.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" +checksum = "12de2eff854e5fa4b1295edd650e227e9d8fb0c9e90b12e7f36d6a6811791a29" dependencies = [ "aho-corasick 1.0.4", "memchr", - "regex-automata 0.3.6", - "regex-syntax 0.7.4", + "regex-automata 0.3.7", + "regex-syntax 0.7.5", ] [[package]] @@ -5115,13 +5080,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" +checksum = "49530408a136e16e5b486e883fbb6ba058e8e4e8ae6621a77b048b314336e629" dependencies = [ "aho-corasick 1.0.4", "memchr", - "regex-syntax 0.7.4", + "regex-syntax 0.7.5", ] [[package]] @@ -5132,9 +5097,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" +checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" [[package]] name = "regress" @@ -5148,11 +5113,11 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.18" +version = "0.11.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" +checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" dependencies = [ - "base64 0.21.2", + "base64 0.21.3", "bytes", "encoding_rs", "futures-core", @@ -5177,7 +5142,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "winreg 0.10.1", + "winreg", ] [[package]] @@ -5192,7 +5157,7 @@ dependencies = [ [[package]] name = "reth" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "backon", "boyer-moore-magiclen", @@ -5264,7 +5229,7 @@ dependencies = [ [[package]] name = "reth-auto-seal-consensus" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "futures-util", "reth-beacon-consensus", @@ -5281,7 +5246,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "futures-core", "futures-util", @@ -5301,7 +5266,7 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "assert_matches", "futures", @@ -5330,7 +5295,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "aquamarine", "assert_matches", @@ -5350,7 +5315,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "arbitrary", "bytes", @@ -5365,7 +5330,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "confy", "reth-discv4", @@ -5382,7 +5347,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "assert_matches", "mockall", @@ -5393,7 +5358,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "arbitrary", "assert_matches", @@ -5435,10 +5400,10 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "discv5", - "enr 0.8.1", + "enr", "generic-array", "hex", "parking_lot 0.12.1", @@ -5459,11 +5424,11 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "async-trait", "data-encoding", - "enr 0.8.1", + "enr", "linked_hash_set", "parking_lot 0.12.1", "reth-net-common", @@ -5483,7 +5448,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "assert_matches", "futures", @@ -5509,7 +5474,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "aes 0.8.3", "block-padding", @@ -5528,7 +5493,7 @@ dependencies = [ "reth-primitives", "reth-rlp", "secp256k1", - "sha2 0.10.7", + "sha2", "sha3", "thiserror", "tokio", @@ -5540,7 +5505,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "arbitrary", "async-trait", @@ -5555,6 +5520,7 @@ dependencies = [ "proptest-derive", "rand 0.8.5", "reth-codecs", + "reth-discv4", "reth-ecies", "reth-metrics", "reth-primitives", @@ -5574,7 +5540,7 @@ dependencies = [ [[package]] name = "reth-interfaces" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "arbitrary", "async-trait", @@ -5602,7 +5568,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "async-trait", "bytes", @@ -5621,7 +5587,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "bitflags 2.4.0", "byteorder", @@ -5641,7 +5607,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "bindgen 0.65.1", "cc", @@ -5650,7 +5616,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "futures", "metrics", @@ -5660,21 +5626,21 @@ dependencies = [ [[package]] name = "reth-metrics-derive" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "metrics", "once_cell", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "regex", "serial_test 0.10.0", - "syn 2.0.28", + "syn 2.0.29", "trybuild", ] [[package]] name = "reth-net-common" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "pin-project", "reth-primitives", @@ -5683,7 +5649,7 @@ dependencies = [ [[package]] name = "reth-net-nat" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "igd", "pin-project-lite", @@ -5697,12 +5663,12 @@ dependencies = [ [[package]] name = "reth-network" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "aquamarine", "async-trait", "auto_impl", - "enr 0.8.1", + "enr", "ethers-core", "ethers-middleware", "ethers-providers", @@ -5748,9 +5714,10 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "async-trait", + "reth-discv4", "reth-eth-wire", "reth-primitives", "reth-rpc-types", @@ -5761,7 +5728,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "futures-util", "metrics", @@ -5772,7 +5739,7 @@ dependencies = [ "reth-rlp", "reth-rpc-types", "revm-primitives", - "sha2 0.10.7", + "sha2", "thiserror", "tokio", "tokio-stream", @@ -5781,7 +5748,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "arbitrary", "assert_matches", @@ -5815,7 +5782,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "sha2 0.10.7", + "sha2", "strum 0.25.0", "sucds", "tempfile", @@ -5833,7 +5800,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "auto_impl", "derive_more", @@ -5854,7 +5821,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "assert_matches", "itertools 0.11.0", @@ -5872,7 +5839,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "once_cell", "reth-consensus-common", @@ -5888,7 +5855,7 @@ dependencies = [ [[package]] name = "reth-revm-inspectors" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "boa_engine", "boa_gc", @@ -5904,7 +5871,7 @@ dependencies = [ [[package]] name = "reth-revm-primitives" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "reth-primitives", "revm", @@ -5912,7 +5879,7 @@ dependencies = [ [[package]] name = "reth-rlp" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "arrayvec", "auto_impl", @@ -5931,16 +5898,16 @@ dependencies = [ [[package]] name = "reth-rlp-derive" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] name = "reth-rpc" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "assert_matches", "async-trait", @@ -5990,7 +5957,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "jsonrpsee", "reth-primitives", @@ -6000,7 +5967,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "async-trait", "futures", @@ -6014,7 +5981,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "hyper", "jsonrpsee", @@ -6046,7 +6013,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "assert_matches", "async-trait", @@ -6068,7 +6035,7 @@ dependencies = [ [[package]] name = "reth-rpc-types" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "itertools 0.11.0", "jsonrpsee-types", @@ -6083,7 +6050,7 @@ dependencies = [ [[package]] name = "reth-rpc-types-compat" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "reth-primitives", "reth-rlp", @@ -6092,7 +6059,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "aquamarine", "assert_matches", @@ -6129,7 +6096,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "dyn-clone", "futures-util", @@ -6143,7 +6110,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "tracing", "tracing-appender", @@ -6153,7 +6120,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "aquamarine", "assert_matches", @@ -6184,7 +6151,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" dependencies = [ "criterion", "derive_more", @@ -6235,7 +6202,7 @@ dependencies = [ "revm-primitives", "ripemd", "secp256k1", - "sha2 0.10.7", + "sha2", "sha3", "substrate-bn", ] @@ -6247,7 +6214,7 @@ source = "git+https://github.com/bluealloy/revm/?branch=release/v25#88337924f4d1 dependencies = [ "arbitrary", "auto_impl", - "bitvec 1.0.1", + "bitvec", "bytes", "derive_more", "enumn", @@ -6334,7 +6301,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -6426,9 +6393,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.8" +version = "0.38.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ed4fa021d81c8392ce04db050a3da9a60299050b7ae1cf482d862b54a7218f" +checksum = "9bfe0f2582b4931a45d1fa608f8a8722e8b3c7ac54dd6d5f3b3212791fedef49" dependencies = [ "bitflags 2.4.0", "errno 0.3.2", @@ -6467,14 +6434,14 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ - "base64 0.21.2", + "base64 0.21.3", ] [[package]] name = "rustls-webpki" -version = "0.101.3" +version = "0.101.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "261e9e0888cba427c3316e6322805653c9425240b6fd96cee7cb671ab70ab8d0" +checksum = "7d93931baf2d282fff8d3a532bbfd7653f734643161b87e3e01e59a04439bf0d" dependencies = [ "ring", "untrusted", @@ -6548,7 +6515,7 @@ checksum = "912e55f6d20e0e80d63733872b40e1227c0bce1e1ab81ba67d696339bfd7fd29" dependencies = [ "proc-macro-crate", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -6587,7 +6554,7 @@ dependencies = [ "hmac", "pbkdf2 0.11.0", "salsa20", - "sha2 0.10.7", + "sha2", ] [[package]] @@ -6698,9 +6665,9 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "serde" -version = "1.0.183" +version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32ac8da02677876d532745a130fc9d8e6edfa81a269b107c5b00829b91d8eb3c" +checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" dependencies = [ "serde_derive", ] @@ -6718,13 +6685,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.183" +version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aafe972d60b0b9bee71a91b92fee2d4fb3c9d7e8f6b179aa99f27203d99a4816" +checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -6783,8 +6750,8 @@ checksum = "881b6f881b17d13214e5d494c939ebab463d01264ce1811e9d4ac3a882e7695f" dependencies = [ "darling 0.20.3", "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -6822,7 +6789,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b64f9e531ce97c88b4778aad0ceee079216071cffec6ac9b904277f8f92e7fe3" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", ] @@ -6833,8 +6800,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -6872,19 +6839,6 @@ dependencies = [ "digest 0.10.7", ] -[[package]] -name = "sha2" -version = "0.9.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", -] - [[package]] name = "sha2" version = "0.10.7" @@ -6982,9 +6936,9 @@ dependencies = [ [[package]] name = "similar-asserts" -version = "1.4.2" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbf644ad016b75129f01a34a355dcb8d66a5bc803e417c7a77cc5d5ee9fa0f18" +checksum = "e041bb827d1bfca18f213411d51b665309f1afb37a04a5d1464530e13779fc0f" dependencies = [ "console", "similar", @@ -7004,9 +6958,9 @@ dependencies = [ [[package]] name = "siphasher" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de" +checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" [[package]] name = "sketches-ddsketch" @@ -7016,9 +6970,9 @@ checksum = "68a406c1882ed7f29cd5e248c9848a80e7cb6ae0fea82346d2746f2f941c07e1" [[package]] name = "slab" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ "autocfg", ] @@ -7173,7 +7127,7 @@ checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "rustversion", "syn 1.0.109", ] @@ -7186,9 +7140,9 @@ checksum = "ad8d03b598d3d0fff69bf533ee3ef19b8eeb342729596df84bcc7e1f96ec4059" dependencies = [ "heck", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "rustversion", - "syn 2.0.28", + "syn 2.0.29", ] [[package]] @@ -7270,18 +7224,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.28" +version = "2.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04361975b3f5e348b2189d8dc55bc942f278b2d482a6a0365de5bdd62d351567" +checksum = "c324c494eba9d92503e6f1ef2e6df781e78f6a7705a0202d9801b198807d518a" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "unicode-ident", ] @@ -7292,7 +7246,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", "unicode-xid 0.2.4", ] @@ -7304,8 +7258,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "285ba80e733fac80aa4270fbcdf83772a79b80aa35c97075320abfee4a915b06" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", "unicode-xid 0.2.4", ] @@ -7317,14 +7271,14 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.7.1" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc02fddf48964c42031a0b3fe0428320ecf3a73c401040fc0096f97794310651" +checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" dependencies = [ "cfg-if", "fastrand 2.0.0", "redox_syscall 0.3.5", - "rustix 0.38.8", + "rustix 0.38.9", "windows-sys 0.48.0", ] @@ -7361,9 +7315,9 @@ version = "4.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f0528a7ad0bc85f826aa831434a37833aea622a5ae155f5b5dd431b25244213" dependencies = [ - "cargo_metadata", + "cargo_metadata 0.15.4", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "serde", "strum_macros 0.25.2", ] @@ -7379,9 +7333,9 @@ dependencies = [ "itertools 0.10.5", "lazy_static", "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "subprocess", - "syn 2.0.28", + "syn 2.0.29", "test-fuzz-internal", "toolchain_find", ] @@ -7408,22 +7362,22 @@ checksum = "aac81b6fd6beb5884b0cf3321b8117e6e5d47ecb6fc89f414cfdcca8b2fe2dd8" [[package]] name = "thiserror" -version = "1.0.46" +version = "1.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9207952ae1a003f42d3d5e892dac3c6ba42aa6ac0c79a6a91a2b5cb4253e75c" +checksum = "97a802ec30afc17eee47b2855fc72e0c4cd62be9b4efe6591edde0ec5bd68d8f" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.46" +version = "1.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1728216d3244de4f14f14f8c15c79be1a7c67867d28d69b719690e2a19fb445" +checksum = "6bb623b56e39ab7dcd4b1b98bb6c8f8d907ed255b18de254088016b27a8ee19b" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -7438,9 +7392,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.25" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fdd63d58b18d663fbdf70e049f00a22c8e42be082203be7f26589213cd75ea" +checksum = "0bb39ee79a6d8de55f48f2293a830e040392f1c5f16e336bdd1788cd0aadce07" dependencies = [ "deranged", "itoa", @@ -7459,9 +7413,9 @@ checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" -version = "0.2.11" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb71511c991639bb078fd5bf97757e03914361c48100d52878b8e52b46fb92cd" +checksum = "733d258752e9303d392b94b75230d07b0b9c489350c69b851fc6c065fde3e8f9" dependencies = [ "time-core", ] @@ -7512,9 +7466,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.31.0" +version = "1.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40de3a2ba249dcb097e01be5e67a5ff53cf250397715a071a81543e8a832a920" +checksum = "17ed6077ed6cd6c74735e21f37eb16dc3935f96878b1fe961074089cc80893f9" dependencies = [ "backtrace", "bytes", @@ -7536,8 +7490,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -7564,9 +7518,9 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec509ac96e9a0c43427c74f003127d953a265737636129424288d27cb5c4b12c" +checksum = "2b2dbec703c26b00d74844519606ef15d09a7d6857860f84ad223dec002ddea2" dependencies = [ "futures-util", "log", @@ -7674,7 +7628,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55ae70283aba8d2a8b411c695c437fe25b8b5e44e23e780662002fc72fb47a82" dependencies = [ "async-compression", - "base64 0.21.2", + "base64 0.21.3", "bitflags 2.4.0", "bytes", "futures-core", @@ -7740,8 +7694,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -7914,9 +7868,9 @@ checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "trybuild" -version = "1.0.82" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a84e0202ea606ba5ebee8507ab2bfbe89b98551ed9b8f0be198109275cff284b" +checksum = "6df60d81823ed9c520ee897489573da4b1d79ffbe006b8134f46de1a1aa03555" dependencies = [ "basic-toml", "glob", @@ -7942,9 +7896,9 @@ dependencies = [ [[package]] name = "tungstenite" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15fba1a6d6bb030745759a9a2a588bfe8490fc8b4751a277db3a0be1c9ebbf67" +checksum = "e862a1c4128df0112ab625f55cd5c934bcb4312ba80b39ae4b4835a3fd58e649" dependencies = [ "byteorder", "bytes", @@ -7991,9 +7945,9 @@ checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" [[package]] name = "unicase" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" dependencies = [ "version_check", ] @@ -8196,8 +8150,8 @@ dependencies = [ "log", "once_cell", "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", "wasm-bindgen-shared", ] @@ -8219,7 +8173,7 @@ version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ - "quote 1.0.32", + "quote 1.0.33", "wasm-bindgen-macro-support", ] @@ -8230,8 +8184,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8318,26 +8272,26 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows-targets 0.48.2", + "windows-targets 0.48.5", ] [[package]] name = "windows" -version = "0.51.0" +version = "0.51.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9763fb813068e9f4ab70a92a0c6ad61ff6b342f693b1ed0e5387c854386e670" +checksum = "ca229916c5ee38c2f2bc1e9d8f04df975b4bd93f9955dc69fabb5d91270045c9" dependencies = [ "windows-core", - "windows-targets 0.48.2", + "windows-targets 0.48.5", ] [[package]] name = "windows-core" -version = "0.51.0" +version = "0.51.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b81650771e76355778637954dc9d7eb8d991cd89ad64ba26f21eeb3c22d8d836" +checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" dependencies = [ - "windows-targets 0.48.2", + "windows-targets 0.48.5", ] [[package]] @@ -8355,7 +8309,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.48.2", + "windows-targets 0.48.5", ] [[package]] @@ -8375,17 +8329,17 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.48.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1eeca1c172a285ee6c2c84c341ccea837e7c01b12fbb2d0fe3c9e550ce49ec8" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ - "windows_aarch64_gnullvm 0.48.2", - "windows_aarch64_msvc 0.48.2", - "windows_i686_gnu 0.48.2", - "windows_i686_msvc 0.48.2", - "windows_x86_64_gnu 0.48.2", - "windows_x86_64_gnullvm 0.48.2", - "windows_x86_64_msvc 0.48.2", + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", ] [[package]] @@ -8396,9 +8350,9 @@ checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.48.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b10d0c968ba7f6166195e13d593af609ec2e3d24f916f081690695cf5eaffb2f" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_msvc" @@ -8408,9 +8362,9 @@ checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" -version = "0.48.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "571d8d4e62f26d4932099a9efe89660e8bd5087775a2ab5cdd8b747b811f1058" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_i686_gnu" @@ -8420,9 +8374,9 @@ checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" -version = "0.48.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2229ad223e178db5fbbc8bd8d3835e51e566b8474bfca58d2e6150c48bb723cd" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_msvc" @@ -8432,9 +8386,9 @@ checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" -version = "0.48.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "600956e2d840c194eedfc5d18f8242bc2e17c7775b6684488af3a9fff6fe3287" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_x86_64_gnu" @@ -8444,9 +8398,9 @@ checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnu" -version = "0.48.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea99ff3f8b49fb7a8e0d305e5aec485bd068c2ba691b6e277d29eaeac945868a" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnullvm" @@ -8456,9 +8410,9 @@ checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_gnullvm" -version = "0.48.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f1a05a1ece9a7a0d5a7ccf30ba2c33e3a61a30e042ffd247567d1de1d94120d" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_msvc" @@ -8468,28 +8422,19 @@ checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "windows_x86_64_msvc" -version = "0.48.2" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d419259aba16b663966e29e6d7c6ecfa0bb8425818bb96f6f1f3c3eb71a6e7b9" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "winnow" -version = "0.5.11" +version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e461589e194280efaa97236b73623445efa195aa633fd7004f39805707a9d53" +checksum = "7c2e3184b9c4e92ad5167ca73039d0c42476302ab603e2fec4487511f38ccefc" dependencies = [ "memchr", ] -[[package]] -name = "winreg" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" -dependencies = [ - "winapi", -] - [[package]] name = "winreg" version = "0.50.0" @@ -8580,7 +8525,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af46c169923ed7516eef0aa32b56d2651b229f57458ebe46b49ddd6efef5b7a2" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", "synstructure 0.12.6", ] @@ -8601,7 +8546,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4eae7c1f7d4b8eafce526bc0771449ddc2f250881ae31c50d22c032b5a1c499" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", "synstructure 0.12.6", ] @@ -8622,8 +8567,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", - "syn 2.0.28", + "quote 1.0.33", + "syn 2.0.29", ] [[package]] @@ -8644,7 +8589,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "486558732d5dde10d0f8cb2936507c1bb21bc539d924c949baf5f36a58e51bac" dependencies = [ "proc-macro2 1.0.66", - "quote 1.0.32", + "quote 1.0.33", "syn 1.0.109", "synstructure 0.12.6", ] diff --git a/Cargo.toml b/Cargo.toml index c177337984f..3396bb62e49 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -56,7 +56,7 @@ default-members = ["bin/reth"] resolver = "2" [workspace.package] -version = "0.1.0-alpha.6" +version = "0.1.0-alpha.7" edition = "2021" rust-version = "1.70" # Remember to update .clippy.toml and README.md license = "MIT OR Apache-2.0" @@ -106,10 +106,10 @@ reth-rpc-types-compat = { path = "./crates/rpc/rpc-types-compat"} ## eth -ethers-core = { version = "2.0.8", default-features = false } -ethers-providers = { version = "2.0.8", default-features = false } -ethers-signers = { version = "2.0.8", default-features = false } -ethers-middleware = { version = "2.0.8", default-features = false } +ethers-core = { version = "2.0", default-features = false } +ethers-providers = { version = "2.0", default-features = false } +ethers-signers = { version = "2.0", default-features = false } +ethers-middleware = { version = "2.0", default-features = false } ## misc bytes = "1.4" @@ -124,6 +124,7 @@ strum = "0.25" rayon = "1.7" itertools = "0.11" parking_lot = "0.12" +metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation ### proc-macros proc-macro2 = "1.0" @@ -147,6 +148,7 @@ jsonrpsee-types = { version = "0.20" } ## crypto secp256k1 = { version = "0.27.0", default-features = false, features = ["global-context", "rand-std", "recovery"] } +enr = { version = "0.9", default-features = false, features = ["k256"] } # for eip-4844 c-kzg = { git = "https://github.com/ethereum/c-kzg-4844" } diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 6f0cb988da1..b95edaa46f8 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -61,7 +61,7 @@ metrics-exporter-prometheus = "0.12.1" metrics-util = "0.15.0" metrics-process = "1.0.9" reth-metrics.workspace = true -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true # test vectors generation proptest.workspace = true diff --git a/bin/reth/src/args/dev_args.rs b/bin/reth/src/args/dev_args.rs index 5cc02522d1f..ec951e79591 100644 --- a/bin/reth/src/args/dev_args.rs +++ b/bin/reth/src/args/dev_args.rs @@ -20,7 +20,7 @@ pub struct DevArgs { /// How many transactions to mine per block. #[arg( - long = "dev.block_max_transactions", + long = "dev.block-max-transactions", help_heading = "Dev testnet", conflicts_with = "block_time" )] @@ -31,7 +31,7 @@ pub struct DevArgs { /// Parses strings using [humantime::parse_duration] /// --dev.block_time 12s #[arg( - long = "dev.block_time", + long = "dev.block-time", help_heading = "Dev testnet", conflicts_with = "block_max_transactions", value_parser = parse_duration, @@ -66,14 +66,14 @@ mod tests { let args = CommandParser::::parse_from([ "reth", "--dev", - "--dev.block_max_transactions", + "--dev.block-max-transactions", "2", ]) .args; assert_eq!(args, DevArgs { dev: true, block_max_transactions: Some(2), block_time: None }); let args = - CommandParser::::parse_from(["reth", "--dev", "--dev.block_time", "1s"]).args; + CommandParser::::parse_from(["reth", "--dev", "--dev.block-time", "1s"]).args; assert_eq!( args, DevArgs { @@ -89,9 +89,9 @@ mod tests { let args = CommandParser::::try_parse_from([ "reth", "--dev", - "--dev.block_max_transactions", + "--dev.block-max-transactions", "2", - "--dev.block_time", + "--dev.block-time", "1s", ]); assert!(args.is_err()); diff --git a/bin/reth/src/args/pruning_args.rs b/bin/reth/src/args/pruning_args.rs index b9a78379890..ccb6dda741f 100644 --- a/bin/reth/src/args/pruning_args.rs +++ b/bin/reth/src/args/pruning_args.rs @@ -3,7 +3,7 @@ use clap::Args; use reth_config::config::PruneConfig; use reth_primitives::{ - ChainSpec, ContractLogsPruneConfig, PruneMode, PruneModes, MINIMUM_PRUNING_DISTANCE, + ChainSpec, PruneMode, PruneModes, ReceiptsLogPruneConfig, MINIMUM_PRUNING_DISTANCE, }; use std::sync::Arc; @@ -13,30 +13,27 @@ use std::sync::Arc; pub struct PruningArgs { /// Run full node. Only the most recent 128 block states are stored. This flag takes /// priority over pruning configuration in reth.toml. - // TODO(alexey): unhide when pruning is ready for production use - #[arg(long, hide = true, default_value_t = false)] + #[arg(long, default_value_t = false)] pub full: bool, } impl PruningArgs { /// Returns pruning configuration. - pub fn prune_config(&self, _chain_spec: Arc) -> eyre::Result> { + pub fn prune_config(&self, chain_spec: Arc) -> eyre::Result> { Ok(if self.full { - eyre::bail!("full node is not supported yet, keep an eye on next releases"); - #[allow(unreachable_code)] Some(PruneConfig { block_interval: 5, parts: PruneModes { sender_recovery: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), transaction_lookup: None, - receipts: _chain_spec + receipts: chain_spec .deposit_contract .as_ref() .map(|contract| PruneMode::Before(contract.block)), account_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), storage_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), - contract_logs_filter: ContractLogsPruneConfig( - _chain_spec + receipts_log_filter: ReceiptsLogPruneConfig( + chain_spec .deposit_contract .as_ref() .map(|contract| (contract.address, PruneMode::Before(contract.block))) diff --git a/bin/reth/src/db/mod.rs b/bin/reth/src/db/mod.rs index 9671f8d795b..53a24b718ad 100644 --- a/bin/reth/src/db/mod.rs +++ b/bin/reth/src/db/mod.rs @@ -188,7 +188,7 @@ impl Command { Subcommands::Drop { force } => { if !force { // Ask for confirmation - print!("Are you sure you want to drop the database? This cannot be undone. (y/N): "); + print!("Are you sure you want to drop the database at {db_path:?}? This cannot be undone. (y/N): "); // Flush the buffer to ensure the message is printed immediately io::stdout().flush().unwrap(); diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index f22dbd9fb9b..7d82a380ced 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -51,7 +51,6 @@ use reth_provider::{ providers::BlockchainProvider, BlockHashReader, BlockReader, CanonStateSubscriptions, HeaderProvider, ProviderFactory, StageCheckpointReader, }; -use reth_prune::BatchSizes; use reth_revm::Factory; use reth_revm_inspectors::stack::Hook; use reth_rpc_engine_api::EngineApi; @@ -445,7 +444,7 @@ impl NodeCommand { self.chain.clone(), prune_config.block_interval, prune_config.parts, - BatchSizes::default(), + self.chain.prune_batch_sizes, ) }); diff --git a/bin/reth/src/prometheus_exporter.rs b/bin/reth/src/prometheus_exporter.rs index cea3c0633bf..bb612d53825 100644 --- a/bin/reth/src/prometheus_exporter.rs +++ b/bin/reth/src/prometheus_exporter.rs @@ -4,11 +4,13 @@ use hyper::{ service::{make_service_fn, service_fn}, Body, Request, Response, Server, }; +use metrics::{describe_gauge, gauge}; use metrics_exporter_prometheus::{PrometheusBuilder, PrometheusHandle}; use metrics_util::layers::{PrefixLayer, Stack}; use reth_db::{database::Database, tables, DatabaseEnv}; -use reth_metrics::metrics::{absolute_counter, describe_counter, Unit}; +use reth_metrics::metrics::Unit; use std::{convert::Infallible, net::SocketAddr, sync::Arc}; +use tracing::error; pub(crate) trait Hook: Fn() + Send + Sync {} impl Hook for T {} @@ -91,15 +93,23 @@ pub(crate) async fn initialize( let overflow_pages = stats.overflow_pages(); let num_pages = leaf_pages + branch_pages + overflow_pages; let table_size = page_size * num_pages; + let entries = stats.entries(); - absolute_counter!("db.table_size", table_size as u64, "table" => table); - absolute_counter!("db.table_pages", leaf_pages as u64, "table" => table, "type" => "leaf"); - absolute_counter!("db.table_pages", branch_pages as u64, "table" => table, "type" => "branch"); - absolute_counter!("db.table_pages", overflow_pages as u64, "table" => table, "type" => "overflow"); + gauge!("db.table_size", table_size as f64, "table" => table); + gauge!("db.table_pages", leaf_pages as f64, "table" => table, "type" => "leaf"); + gauge!("db.table_pages", branch_pages as f64, "table" => table, "type" => "branch"); + gauge!("db.table_pages", overflow_pages as f64, "table" => table, "type" => "overflow"); + gauge!("db.table_entries", entries as f64, "table" => table); } Ok::<(), eyre::Report>(()) - }); + }).map_err(|error| error!(?error, "Failed to read db table stats")); + + if let Ok(freelist) = + db.freelist().map_err(|error| error!(?error, "Failed to read db.freelist")) + { + gauge!("db.freelist", freelist as f64); + } }; // Clone `process` to move it into the hook and use the original `process` for describe below. @@ -113,8 +123,10 @@ pub(crate) async fn initialize( // We describe the metrics after the recorder is installed, otherwise this information is not // registered - describe_counter!("db.table_size", Unit::Bytes, "The size of a database table (in bytes)"); - describe_counter!("db.table_pages", "The number of database pages for a table"); + describe_gauge!("db.table_size", Unit::Bytes, "The size of a database table (in bytes)"); + describe_gauge!("db.table_pages", "The number of database pages for a table"); + describe_gauge!("db.table_entries", "The number of entries for a table"); + describe_gauge!("db.freelist", "The number of pages on the freelist"); process.describe(); describe_memory_stats(); @@ -124,8 +136,6 @@ pub(crate) async fn initialize( #[cfg(all(feature = "jemalloc", unix))] fn collect_memory_stats() { use jemalloc_ctl::{epoch, stats}; - use reth_metrics::metrics::gauge; - use tracing::error; if epoch::advance().map_err(|error| error!(?error, "Failed to advance jemalloc epoch")).is_err() { @@ -171,8 +181,6 @@ fn collect_memory_stats() { #[cfg(all(feature = "jemalloc", unix))] fn describe_memory_stats() { - use reth_metrics::metrics::describe_gauge; - describe_gauge!( "jemalloc.active", Unit::Bytes, diff --git a/book/SUMMARY.md b/book/SUMMARY.md index ffdb45c0cfd..9a8b903153a 100644 --- a/book/SUMMARY.md +++ b/book/SUMMARY.md @@ -9,9 +9,11 @@ 1. [Update Priorities](./installation/priorities.md) 1. [Run a Node](./run/run-a-node.md) 1. [Mainnet or official testnets](./run/mainnet.md) + 1. [Private testnet](./run/private-testnet.md) 1. [Metrics](./run/observability.md) 1. [Configuring Reth](./run/config.md) 1. [Transaction types](./run/transactions.md) + 1. [Pruning](./run/pruning.md) 1. [Troubleshooting](./run/troubleshooting.md) 1. [Interacting with Reth over JSON-RPC](./jsonrpc/intro.md) 1. [eth](./jsonrpc/eth.md) diff --git a/book/cli/node.md b/book/cli/node.md index 78d9b40dba2..2aa71d1ccef 100644 --- a/book/cli/node.md +++ b/book/cli/node.md @@ -314,6 +314,29 @@ Database: --auto-mine Automatically mine blocks for new transactions +Dev testnet: + --dev + Start the node in dev mode + + This mode uses a local proof-of-authority consensus engine with either fixed block times + or automatically mined blocks. + Disables network discovery and enables local http server. + Prefunds 20 accounts derived by mnemonic "test test test test test test test test test test + test junk" with 10 000 ETH each. + + --dev.block-max-transactions + How many transactions to mine per block + + --dev.block-time + Interval between blocks. + + Parses strings using [humantime::parse_duration] + --dev.block-time 12s + +Pruning: + --full + Run full node. Only the most recent 128 block states are stored. This flag takes priority over pruning configuration in reth.toml + Logging: --log.persistent The flag to enable persistent logs diff --git a/book/installation/docker.md b/book/installation/docker.md index 44cd711567d..422e692033b 100644 --- a/book/installation/docker.md +++ b/book/installation/docker.md @@ -96,7 +96,7 @@ docker compose -f etc/docker-compose.yml -f etc/lighthouse.yml up -d To check if Reth is running correctly, run: ```bash -docker compose logs -f reth +docker compose -f etc/docker-compose.yml -f etc/lighthouse.yml logs -f reth ``` The default `docker-compose.yml` file will create three containers: @@ -124,4 +124,4 @@ docker exec -it reth bash **If Reth is running with Docker Compose, replace `reth` with `reth-reth-1` in the above command** -Refer to the [CLI docs](../cli/cli.md) to interact with Reth once inside the Reth container. \ No newline at end of file +Refer to the [CLI docs](../cli/cli.md) to interact with Reth once inside the Reth container. diff --git a/book/installation/installation.md b/book/installation/installation.md index a7fd324bf13..0dd779cb276 100644 --- a/book/installation/installation.md +++ b/book/installation/installation.md @@ -33,10 +33,11 @@ Prior to purchasing an NVMe drive, it is advisable to research and determine whe ### Disk -There are multiple types of disks to sync Reth, with varying size requirements, depending on the syncing mode: +There are multiple types of disks to sync Reth, with varying size requirements, depending on the syncing mode. +As of August 2023 at block number 17.9M: -* Archive Node: At least 2.1TB is required (as of July 2023, at block number 17.7M) -* Full Node: TBD +* Archive Node: At least 2.1TB is required +* Full Node: At least 1TB is required NVMe drives are recommended for the best performance, with SSDs being a cheaper alternative. HDDs are the cheapest option, but they will take the longest to sync, and are not recommended. diff --git a/book/run/config.md b/book/run/config.md index 3d5c6ce5d6b..e432055f9a4 100644 --- a/book/run/config.md +++ b/book/run/config.md @@ -27,6 +27,7 @@ The configuration file contains the following sections: - [`reputation_weights`](#reputation_weights) - [`backoff_durations`](#backoff_durations) - [`[sessions]`](#the-sessions-section) +- [`[prune]`](#the-prune-section) ## The `[stages]` section @@ -330,4 +331,56 @@ secs = 120 nanos = 0 ``` +## The `[prune]` section + +The prune section configures the pruning configuration. + +You can configure the pruning of different parts of the data independently of others. +For any unspecified parts, the default setting is no pruning. + +### Default config + +No pruning, run as archive node. + +### Example of the custom pruning configuration + +This configuration will: +- Run pruning every 5 blocks +- Continuously prune all transaction senders, account history and storage history before the block `head-128`, i.e. keep the data for the last 129 blocks +- Prune all receipts before the block 1920000, i.e. keep receipts from the block 1920000 + +```toml +[prune] +# Minimum pruning interval measured in blocks +block_interval = 5 + +[prune.parts] +# Sender Recovery pruning configuration +sender_recovery = { distance = 128 } # Prune all transaction senders before the block `head-128`, i.e. keep transaction senders for the last 129 blocks + +# Transaction Lookup pruning configuration +transaction_lookup = "full" # Prune all TxNumber => TxHash mappings + +# Receipts pruning configuration. This setting overrides `receipts_log_filter`. +receipts = { before = 1920000 } # Prune all receipts from transactions before the block 1920000, i.e. keep receipts from the block 1920000 + +# Account History pruning configuration +account_history = { distance = 128 } # Prune all historical account states before the block `head-128` + +# Storage History pruning configuration +storage_history = { distance = 128 } # Prune all historical storage states before the block `head-128` +``` + +We can also prune receipts more granular, using the logs filtering: +```toml +# Receipts pruning configuration by retaining only those receipts that contain logs emitted +# by the specified addresses, discarding all others. This setting is overridden by `receipts`. +[prune.parts.receipts_log_filter] +# Prune all receipts, leaving only those which: +# - Contain logs from address `0x7ea2be2df7ba6e54b1a9c70676f668455e329d29`, starting from the block 17000000 +# - Contain logs from address `0xdac17f958d2ee523a2206206994597c13d831ec7` in the last 1001 blocks +"0x7ea2be2df7ba6e54b1a9c70676f668455e329d29" = { before = 17000000 } +"0xdac17f958d2ee523a2206206994597c13d831ec7" = { distance = 1000 } +``` + [TOML]: https://toml.io/ diff --git a/book/run/private-testnet.md b/book/run/private-testnet.md new file mode 100644 index 00000000000..901a4de68a6 --- /dev/null +++ b/book/run/private-testnet.md @@ -0,0 +1,93 @@ +# Run Reth in a private testnet using Kurtosis +For those who need a private testnet to validate functionality or scale with Reth. + +## Using Docker locally +This guide uses [Kurtosis' eth2-package](https://github.com/kurtosis-tech/eth2-package) and assumes you have Kurtosis and Docker installed and have Docker already running on your machine. +* Go [here](https://docs.kurtosis.com/install/) to install Kurtosis +* Go [here](https://docs.docker.com/get-docker/) to install Docker + +The `eth2-package` is a package that is a general purpose testnet definition for instantiating private testnets at any scale over Docker or Kubernetes. This guide will go through how to spin up a local private testnet with Reth various CL clients. Specifically, you will instantiate a 2-node network over Docker with Reth/Lighthouse and Reth/Teku client combinations. + +To see all possible configurations and flags you can use, including metrics and observability tools (e.g. Grafana, Prometheus, etc), go [here](https://github.com/kurtosis-tech/eth2-package#configuration). + +Genesis data will be generated using this [genesis-generator](https://github.com/ethpandaops/ethereum-genesis-generator) to be used to bootstrap the EL and CL clients for each node. The end result will be a private testnet with nodes deployed as Docker containers in an ephemeral, isolated environment on your machine called an [enclave](https://docs.kurtosis.com/concepts-reference/enclaves/). Read more about how the `eth2-package` works by going [here](https://github.com/kurtosis-tech/eth2-package/). + +First, in your home directory, create a file with the name `network_params.json` with the following contents: +```json +{ + "participants": [ + { + "el_client_type": "reth", + "el_client_image": "ghcr.io/paradigmxyz/reth", + "cl_client_type": "lighthouse", + "cl_client_image": "sigp/lighthouse:latest", + "count": 1 + }, + { + "el_client_type": "reth", + "el_client_image": "ghcr.io/paradigmxyz/reth", + "cl_client_type": "teku", + "cl_client_image": "consensys/teku:latest", + "count": 1 + } + ] + "launch_additional_services": false +} +``` + +Next, run the following command from your command line: +```bash +kurtosis run github.com/kurtosis-tech/eth2-package "$(cat ~/network_params.json)" +``` + +In the end, Kurtosis will print the services running in your enclave that form your private testnet alongside all the container ports and files that were generated & used to start up the private testnet. Here is a sample output: +```console +INFO[2023-08-21T18:22:18-04:00] ==================================================== +INFO[2023-08-21T18:22:18-04:00] || Created enclave: silky-swamp || +INFO[2023-08-21T18:22:18-04:00] ==================================================== +Name: silky-swamp +UUID: 3df730c66123 +Status: RUNNING +Creation Time: Mon, 21 Aug 2023 18:21:32 EDT + +========================================= Files Artifacts ========================================= +UUID Name +c168ec4468f6 1-lighthouse-reth-0-63 +61f821e2cfd5 2-teku-reth-64-127 +e6f94fdac1b8 cl-genesis-data +e6b57828d099 el-genesis-data +1fb632573a2e genesis-generation-config-cl +b8917e497980 genesis-generation-config-el +6fd8c5be336a geth-prefunded-keys +6ab83723b4bd prysm-password + +========================================== User Services ========================================== +UUID Name Ports Status +95386198d3f9 cl-1-lighthouse-reth http: 4000/tcp -> http://127.0.0.1:64947 RUNNING + metrics: 5054/tcp -> http://127.0.0.1:64948 + tcp-discovery: 9000/tcp -> 127.0.0.1:64949 + udp-discovery: 9000/udp -> 127.0.0.1:60303 +5f5cc4cf639a cl-1-lighthouse-reth-validator http: 5042/tcp -> 127.0.0.1:64950 RUNNING + metrics: 5064/tcp -> http://127.0.0.1:64951 +27e1cfaddc72 cl-2-teku-reth http: 4000/tcp -> 127.0.0.1:64954 RUNNING + metrics: 8008/tcp -> 127.0.0.1:64952 + tcp-discovery: 9000/tcp -> 127.0.0.1:64953 + udp-discovery: 9000/udp -> 127.0.0.1:53749 +b454497fbec8 el-1-reth-lighthouse engine-rpc: 8551/tcp -> 127.0.0.1:64941 RUNNING + metrics: 9001/tcp -> 127.0.0.1:64937 + rpc: 8545/tcp -> 127.0.0.1:64939 + tcp-discovery: 30303/tcp -> 127.0.0.1:64938 + udp-discovery: 30303/udp -> 127.0.0.1:55861 + ws: 8546/tcp -> 127.0.0.1:64940 +03a2ef13c99b el-2-reth-teku engine-rpc: 8551/tcp -> 127.0.0.1:64945 RUNNING + metrics: 9001/tcp -> 127.0.0.1:64946 + rpc: 8545/tcp -> 127.0.0.1:64943 + tcp-discovery: 30303/tcp -> 127.0.0.1:64942 + udp-discovery: 30303/udp -> 127.0.0.1:64186 + ws: 8546/tcp -> 127.0.0.1:64944 +5c199b334236 prelaunch-data-generator-cl-genesis-data RUNNING +46829c4bd8b0 prelaunch-data-generator-el-genesis-data RUNNING +``` + +## Using Kubernetes on remote infrastructure +Kurtosis packages are portable and reproducible, meaning they will work the same way over Docker locally as in the cloud on Kubernetes. Check out these docs [here](https://docs.kurtosis.com/k8s/) to learn how to deploy your private testnet to a Kubernetes cluster. diff --git a/book/run/pruning.md b/book/run/pruning.md new file mode 100644 index 00000000000..259110c32ac --- /dev/null +++ b/book/run/pruning.md @@ -0,0 +1,183 @@ +# Pruning + +> WARNING: pruning and full node are experimental features of Reth, +> and available only on `main` branch of the main repository now. + +By default, Reth runs as an archive node. Such nodes have all historical blocks and the state at each of these blocks +available for querying and tracing. + +Reth also supports pruning of historical data and running as a full node. This chapter will walk through +the steps for running Reth as a full node, what caveats to expect and how to configure your own pruned node. + +## Basic concepts + +- Archive node – Reth node that has all historical data from genesis. +- Pruned node – Reth node that has its historical data pruned partially or fully through +a [custom configuration](./config.md#the-prune-section). +- Full Node – Reth node that has the latest state and historical data for only the last 128 blocks available +for querying in the same way as an archive node. + +The node type that was chosen when first [running a node](./run-a-node.md) **can not** be changed after +the initial sync. Turning Archive into Pruned, or Pruned into Full is not supported. + +## Modes +### Archive Node + +Default mode, follow the steps from the previous chapter on [how to run on mainnet or official testnets](./mainnet.md). + +### Full Node + +To run Reth as a full node, follow the steps from the previous chapter on +[how to run on mainnet or official testnets](./mainnet.md), and add a `--full` flag. For example: +```bash +RUST_LOG=info reth node \ + --full \ + --authrpc.jwtsecret /path/to/secret \ + --authrpc.addr 127.0.0.1 \ + --authrpc.port 8551 +``` + +### Pruned Node + +To run Reth as a pruned node configured through a [custom configuration](./config.md#the-prune-section), +modify the `reth.toml` file and run Reth in the same way as archive node by following the steps from +the previous chapter on [how to run on mainnet or official testnets](./mainnet.md). + +## Size + +All numbers are as of August 2023 at block number 17.9M for mainnet. + +### Archive + +Archive node occupies at least 2.1TB. + +You can track the growth of Reth archive node size with our +[public Grafana dashboard](https://reth.paradigm.xyz/d/2k8BXz24k/reth?orgId=1&refresh=30s&viewPanel=52). + +### Full + +Full node occupies 1TB at the peak, and slowly goes down to 920GB. + +### Pruned + +Different parts take up different amounts of disk space. +If pruned fully, this is the total freed space you'll get, per part: + +| Part | Size | +|--------------------|-------| +| Sender Recovery | 70GB | +| Transaction Lookup | 140GB | +| Receipts | 240GB | +| Account History | 230GB | +| Storage History | 680GB | + +## RPC support + +As it was mentioned in the [pruning configuration chapter](./config.md#the-prune-section), there are several parts +which can be pruned independently of each other: +- Sender Recovery +- Transaction Lookup +- Receipts +- Account History +- Storage History + +Pruning of each of these parts disables different RPC methods, because the historical data or lookup indexes +become unavailable. + +The following tables describe the requirements for prune parts, per RPC method: +- ✅ – if the part is pruned, the RPC method still works +- ❌ - if the part is pruned, the RPC method doesn't work anymore + +### `debug` namespace + +| RPC / Part | Sender Recovery | Transaction Lookup | Receipts | Account History | Storage History | +|----------------------------|-----------------|--------------------|----------|-----------------|-----------------| +| `debug_getRawBlock` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `debug_getRawHeader` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `debug_getRawReceipts` | ✅ | ✅ | ❌ | ✅ | ✅ | +| `debug_getRawTransaction` | ✅ | ❌ | ✅ | ✅ | ✅ | +| `debug_traceBlock` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `debug_traceBlockByHash` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `debug_traceBlockByNumber` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `debug_traceCall` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `debug_traceCallMany` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `debug_traceTransaction` | ✅ | ✅ | ✅ | ❌ | ❌ | + + +### `eth` namespace + +| RPC / Part | Sender Recovery | Transaction Lookup | Receipts | Account History | Storage History | +|-------------------------------------------|-----------------|--------------------|----------|-----------------|-----------------| +| `eth_accounts` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_blockNumber` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_call` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `eth_chainId` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_createAccessList` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `eth_estimateGas` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `eth_feeHistory` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_gasPrice` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getBalance` | ✅ | ✅ | ✅ | ❌ | ✅ | +| `eth_getBlockByHash` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getBlockByNumber` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getBlockReceipts` | ✅ | ✅ | ❌ | ✅ | ✅ | +| `eth_getBlockTransactionCountByHash` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getBlockTransactionCountByNumber` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getCode` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getFilterChanges` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getFilterLogs` | ✅ | ✅ | ❌ | ✅ | ✅ | +| `eth_getLogs` | ✅ | ✅ | ❌ | ✅ | ✅ | +| `eth_getStorageAt` | ✅ | ✅ | ✅ | ✅ | ❌ | +| `eth_getTransactionByBlockHashAndIndex` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getTransactionByBlockNumberAndIndex` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getTransactionByHash` | ✅ | ❌ | ✅ | ✅ | ✅ | +| `eth_getTransactionCount` | ✅ | ✅ | ✅ | ❌ | ✅ | +| `eth_getTransactionReceipt` | ✅ | ❌ | ❌ | ✅ | ✅ | +| `eth_getUncleByBlockHashAndIndex` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getUncleByBlockNumberAndIndex` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getUncleCountByBlockHash` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getUncleCountByBlockNumber` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_maxPriorityFeePerGas` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_mining` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_newBlockFilter` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_newFilter` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_newPendingTransactionFilter` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_protocolVersion` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_sendRawTransaction` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_sendTransaction` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_sign` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_signTransaction` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_signTypedData` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_subscribe` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_syncing` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_uninstallFilter` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_unsubscribe` | ✅ | ✅ | ✅ | ✅ | ✅ | + +### `net` namespace + +| RPC / Part | Sender Recovery | Transaction Lookup | Receipts | Account History | Storage History | +|-----------------|-----------------|--------------------|----------|-----------------|-----------------| +| `net_listening` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `net_peerCount` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `net_version` | ✅ | ✅ | ✅ | ✅ | ✅ | + +### `trace` namespace + +| RPC / Part | Sender Recovery | Transaction Lookup | Receipts | Account History | Storage History | +|---------------------------------|-----------------|--------------------|----------|-----------------|-----------------| +| `trace_block` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `trace_call` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `trace_callMany` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `trace_get` | ✅ | ❌ | ✅ | ❌ | ❌ | +| `trace_rawTransaction` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `trace_replayBlockTransactions` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `trace_replayTransaction` | ✅ | ❌ | ✅ | ❌ | ❌ | +| `trace_transaction` | ✅ | ❌ | ✅ | ❌ | ❌ | + +### `txpool` namespace + +| RPC / Part | Sender Recovery | Transaction Lookup | Receipts | Account History | Storage History | +|----------------------|-----------------|--------------------|----------|-----------------|-----------------| +| `txpool_content` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `txpool_contentFrom` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `txpool_inspect` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `txpool_status` | ✅ | ✅ | ✅ | ✅ | ✅ | diff --git a/book/run/run-a-node.md b/book/run/run-a-node.md index 570b28d8023..54ffe873fa6 100644 --- a/book/run/run-a-node.md +++ b/book/run/run-a-node.md @@ -7,6 +7,7 @@ In this chapter we'll go through a few different topics you'll encounter when ru 1. [Logs and Observability](./observability.md) 1. [Configuring reth.toml](./config.md) 1. [Transaction types](./transactions.md) +1. [Pruning](./pruning.md) 1. [Troubleshooting](./troubleshooting.md) In the future, we also intend to support the [OP Stack](https://stack.optimism.io/docs/understand/explainer/), which will allow you to run Reth as a Layer 2 client. More there soon! diff --git a/book/run/troubleshooting.md b/book/run/troubleshooting.md index 4fb2ef61e0b..e810c9b559d 100644 --- a/book/run/troubleshooting.md +++ b/book/run/troubleshooting.md @@ -33,6 +33,6 @@ Caused by: git clone https://github.com/paradigmxyz/reth cd reth make db-tools - db-tools/mdbx_chk $(reth db path)/mdbx.dat + db-tools/mdbx_chk $(reth db path)/mdbx.dat | tee mdbx_chk.log ``` - If `mdbx_chk` has detected any errors, please [open an issue](https://github.com/paradigmxyz/reth/issues) and post the output. \ No newline at end of file + If `mdbx_chk` has detected any errors, please [open an issue](https://github.com/paradigmxyz/reth/issues) and post the output from the `mdbx_chk.log` file. \ No newline at end of file diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index 12dbd33d213..e18e2562eea 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -28,7 +28,7 @@ tracing.workspace = true # metrics reth-metrics = { workspace = true, features = ["common"] } -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true # misc aquamarine = "0.3.0" diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index cc54312553e..15dcb42f1ad 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -283,14 +283,14 @@ impl Default for IndexHistoryConfig { #[serde(default)] pub struct PruneConfig { /// Minimum pruning interval measured in blocks. - pub block_interval: u64, + pub block_interval: usize, /// Pruning configuration for every part of the data that can be pruned. pub parts: PruneModes, } impl Default for PruneConfig { fn default() -> Self { - Self { block_interval: 10, parts: PruneModes::default() } + Self { block_interval: 5, parts: PruneModes::default() } } } diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 1c8ca449ff8..8739b68842a 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -27,7 +27,7 @@ futures.workspace = true # metrics reth-metrics.workspace = true -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true # misc tracing.workspace = true diff --git a/crates/consensus/beacon/src/engine/prune.rs b/crates/consensus/beacon/src/engine/prune.rs index 257170a376f..4b2b4852dcc 100644 --- a/crates/consensus/beacon/src/engine/prune.rs +++ b/crates/consensus/beacon/src/engine/prune.rs @@ -3,7 +3,7 @@ use futures::FutureExt; use reth_db::database::Database; use reth_primitives::BlockNumber; -use reth_prune::{Pruner, PrunerError, PrunerWithResult}; +use reth_prune::{Pruner, PrunerResult, PrunerWithResult}; use reth_tasks::TaskSpawner; use std::task::{ready, Context, Poll}; use tokio::sync::oneshot; @@ -116,7 +116,7 @@ pub(crate) enum EnginePruneEvent { /// If this is returned, the pruner is idle. Finished { /// Final result of the pruner run. - result: Result<(), PrunerError>, + result: PrunerResult, }, /// Pruner task was dropped after it was started, unable to receive it because channel /// closed. This would indicate a panicked pruner task diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 4ccf958bb92..a53e7645aa2 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -20,12 +20,12 @@ use reth_interfaces::{ test_utils::{NoopFullBlockClient, TestConsensus}, }; use reth_payload_builder::test_utils::spawn_test_payload_service; -use reth_primitives::{BlockNumber, ChainSpec, PruneModes, H256, U256}; +use reth_primitives::{BlockNumber, ChainSpec, PruneBatchSizes, PruneModes, H256, U256}; use reth_provider::{ providers::BlockchainProvider, test_utils::TestExecutorFactory, BlockExecutor, ExecutorFactory, ProviderFactory, StateProvider, }; -use reth_prune::{BatchSizes, Pruner}; +use reth_prune::Pruner; use reth_revm::Factory; use reth_rpc_types::engine::{ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus}; use reth_stages::{ @@ -486,7 +486,7 @@ where self.base_config.chain_spec.clone(), 5, PruneModes::default(), - BatchSizes::default(), + PruneBatchSizes::default(), ); let (mut engine, handle) = BeaconConsensusEngine::new( diff --git a/crates/interfaces/src/test_utils/generators.rs b/crates/interfaces/src/test_utils/generators.rs index 546c7f4e7d4..f856cfde6f8 100644 --- a/crates/interfaces/src/test_utils/generators.rs +++ b/crates/interfaces/src/test_utils/generators.rs @@ -239,7 +239,7 @@ where // deposit in receiving account and update storage let (prev_to, storage): &mut (Account, BTreeMap) = state.get_mut(&to).unwrap(); - let old_entries = new_entries + let mut old_entries: Vec<_> = new_entries .into_iter() .filter_map(|entry| { let old = if entry.value != U256::ZERO { @@ -254,9 +254,12 @@ where Some(StorageEntry { value: old.unwrap_or(U256::from(0)), ..entry }) }) .collect(); + old_entries.sort_by_key(|entry| entry.key); changeset.push((to, *prev_to, old_entries)); + changeset.sort_by_key(|(address, _, _)| *address); + prev_to.balance = prev_to.balance.wrapping_add(transfer); changesets.push(changeset); diff --git a/crates/metrics/Cargo.toml b/crates/metrics/Cargo.toml index dbc9ab90810..8a71b0ef518 100644 --- a/crates/metrics/Cargo.toml +++ b/crates/metrics/Cargo.toml @@ -13,7 +13,7 @@ description = "reth metrics utilities" reth-metrics-derive = { path = "./metrics-derive" } # metrics -metrics = "0.21.1" +metrics.workspace = true # async tokio = { workspace = true, features = ["full"], optional = true } diff --git a/crates/metrics/metrics-derive/Cargo.toml b/crates/metrics/metrics-derive/Cargo.toml index 4a2b7c2c7b5..a5e85a8422b 100644 --- a/crates/metrics/metrics-derive/Cargo.toml +++ b/crates/metrics/metrics-derive/Cargo.toml @@ -18,6 +18,6 @@ regex = "1.6.0" once_cell = "1.17.0" [dev-dependencies] -metrics = "0.21.1" +metrics.workspace = true trybuild = "1.0" serial_test = "0.10" diff --git a/crates/net/discv4/Cargo.toml b/crates/net/discv4/Cargo.toml index bd9a1f95dc5..310da6c8a61 100644 --- a/crates/net/discv4/Cargo.toml +++ b/crates/net/discv4/Cargo.toml @@ -21,7 +21,7 @@ reth-net-nat = { path = "../nat" } # ethereum discv5 = { git = "https://github.com/sigp/discv5" } secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery", "serde"] } -enr = { version = "0.8.1", default-features = false, features = ["rust-secp256k1"] } +enr = { workspace = true, default-features = false, features = ["rust-secp256k1"] } # async/futures tokio = { workspace = true, features = ["io-util", "net", "time"] } diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 20b729330e9..fa7077bc312 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -2178,7 +2178,7 @@ mod tests { let v4: Ipv4Addr = "0.0.0.0".parse().unwrap(); let v6 = v4.to_ipv6_mapped(); - let addr: SocketAddr = (v6, 30303).into(); + let addr: SocketAddr = (v6, DEFAULT_DISCOVERY_PORT).into(); let ping = Ping { from: rng_endpoint(&mut rng), @@ -2210,7 +2210,7 @@ mod tests { let v4: Ipv4Addr = "0.0.0.0".parse().unwrap(); let v6 = v4.to_ipv6_mapped(); - let addr: SocketAddr = (v6, 30303).into(); + let addr: SocketAddr = (v6, DEFAULT_DISCOVERY_PORT).into(); let ping = Ping { from: rng_endpoint(&mut rng), diff --git a/crates/net/discv4/src/proto.rs b/crates/net/discv4/src/proto.rs index 88b3bb93644..b02082f9e86 100644 --- a/crates/net/discv4/src/proto.rs +++ b/crates/net/discv4/src/proto.rs @@ -496,7 +496,7 @@ mod tests { use super::*; use crate::{ test_utils::{rng_endpoint, rng_ipv4_record, rng_ipv6_record, rng_message}, - SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS, + DEFAULT_DISCOVERY_PORT, SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS, }; use enr::{EnrBuilder, EnrPublicKey}; use rand::{thread_rng, Rng, RngCore}; @@ -773,7 +773,7 @@ mod tests { assert_eq!(enr.0.ip4(), Some(Ipv4Addr::new(127, 0, 0, 1))); assert_eq!(enr.0.id(), Some(String::from("v4"))); - assert_eq!(enr.0.udp4(), Some(30303)); + assert_eq!(enr.0.udp4(), Some(DEFAULT_DISCOVERY_PORT)); assert_eq!(enr.0.tcp4(), None); assert_eq!(enr.0.signature(), &signature[..]); assert_eq!(pubkey.to_vec(), expected_pubkey); @@ -808,7 +808,7 @@ mod tests { assert_eq!(enr.0.ip4(), Some(Ipv4Addr::new(127, 0, 0, 1))); assert_eq!(enr.0.id(), Some(String::from("v4"))); - assert_eq!(enr.0.udp4(), Some(30303)); + assert_eq!(enr.0.udp4(), Some(DEFAULT_DISCOVERY_PORT)); assert_eq!(enr.0.tcp4(), None); assert_eq!(enr.0.signature(), &signature[..]); assert_eq!(pubkey.to_vec(), expected_pubkey); diff --git a/crates/net/dns/Cargo.toml b/crates/net/dns/Cargo.toml index eeb6d81fd7a..b4ef53c8947 100644 --- a/crates/net/dns/Cargo.toml +++ b/crates/net/dns/Cargo.toml @@ -16,7 +16,7 @@ reth-rlp.workspace = true # ethereum secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery", "serde"] } -enr = { version = "0.8.1", default-features = false, features = ["rust-secp256k1"] } +enr = { workspace = true, default-features = false, features = ["rust-secp256k1"] } # async/futures tokio = { workspace = true, features = ["io-util", "net", "time"] } diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index aecd48abdaa..ccc803c8d46 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -25,7 +25,7 @@ tokio-util = { workspace = true, features = ["codec"] } # metrics reth-metrics.workspace = true -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true # misc tracing.workspace = true diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index 7fb32727061..1551535af96 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -24,10 +24,11 @@ reth-rlp = { workspace = true, features = [ "ethereum-types", "smol_str", ] } +reth-discv4 = {path = "../discv4" } # metrics reth-metrics.workspace = true -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true # used for Chain and builders ethers-core = { workspace = true, default-features = false } diff --git a/crates/net/eth-wire/src/builder.rs b/crates/net/eth-wire/src/builder.rs index 23da3bce69a..6b271f72df1 100644 --- a/crates/net/eth-wire/src/builder.rs +++ b/crates/net/eth-wire/src/builder.rs @@ -4,6 +4,7 @@ use crate::{ capability::Capability, hello::HelloMessage, p2pstream::ProtocolVersion, EthVersion, Status, }; +use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_primitives::{Chain, ForkId, PeerId, H256, U256}; /// Builder for [`Status`](crate::types::Status) messages. @@ -99,8 +100,7 @@ impl HelloBuilder { // TODO: proper client versioning client_version: "Ethereum/1.0.0".to_string(), capabilities: vec![EthVersion::Eth68.into()], - // TODO: default port config - port: 30303, + port: DEFAULT_DISCOVERY_PORT, id: pubkey, }, } diff --git a/crates/net/eth-wire/src/ethstream.rs b/crates/net/eth-wire/src/ethstream.rs index ae5d60edf3e..70472e9e9fb 100644 --- a/crates/net/eth-wire/src/ethstream.rs +++ b/crates/net/eth-wire/src/ethstream.rs @@ -323,6 +323,7 @@ mod tests { }; use ethers_core::types::Chain; use futures::{SinkExt, StreamExt}; + use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_ecies::{stream::ECIESStream, util::pk2id}; use reth_primitives::{ForkFilter, Head, H256, U256}; use secp256k1::{SecretKey, SECP256K1}; @@ -591,7 +592,7 @@ mod tests { protocol_version: ProtocolVersion::V5, client_version: "bitcoind/1.0.0".to_string(), capabilities: vec![Capability::new("eth".into(), EthVersion::Eth67 as usize)], - port: 30303, + port: DEFAULT_DISCOVERY_PORT, id: pk2id(&server_key.public_key(SECP256K1)), }; @@ -619,7 +620,7 @@ mod tests { protocol_version: ProtocolVersion::V5, client_version: "bitcoind/1.0.0".to_string(), capabilities: vec![Capability::new("eth".into(), EthVersion::Eth67 as usize)], - port: 30303, + port: DEFAULT_DISCOVERY_PORT, id: pk2id(&client_key.public_key(SECP256K1)), }; diff --git a/crates/net/eth-wire/src/hello.rs b/crates/net/eth-wire/src/hello.rs index c14759fd40b..acbb2c4337d 100644 --- a/crates/net/eth-wire/src/hello.rs +++ b/crates/net/eth-wire/src/hello.rs @@ -1,5 +1,6 @@ use crate::{capability::Capability, EthVersion, ProtocolVersion}; use reth_codecs::derive_arbitrary; +use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_primitives::{constants::RETH_CLIENT_VERSION, PeerId}; use reth_rlp::{RlpDecodable, RlpEncodable}; @@ -99,7 +100,7 @@ impl HelloMessageBuilder { capabilities: capabilities.unwrap_or_else(|| { vec![EthVersion::Eth68.into(), EthVersion::Eth67.into(), EthVersion::Eth66.into()] }), - port: port.unwrap_or(30303), + port: port.unwrap_or(DEFAULT_DISCOVERY_PORT), id, } } @@ -107,6 +108,7 @@ impl HelloMessageBuilder { #[cfg(test)] mod tests { + use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_ecies::util::pk2id; use reth_rlp::{Decodable, Encodable, EMPTY_STRING_CODE}; use secp256k1::{SecretKey, SECP256K1}; @@ -123,7 +125,7 @@ mod tests { protocol_version: ProtocolVersion::V5, client_version: "reth/0.1.0".to_string(), capabilities: vec![Capability::new("eth".into(), EthVersion::Eth67 as usize)], - port: 30303, + port: DEFAULT_DISCOVERY_PORT, id, }); @@ -143,7 +145,7 @@ mod tests { protocol_version: ProtocolVersion::V5, client_version: "reth/0.1.0".to_string(), capabilities: vec![Capability::new("eth".into(), EthVersion::Eth67 as usize)], - port: 30303, + port: DEFAULT_DISCOVERY_PORT, id, }); @@ -162,7 +164,7 @@ mod tests { protocol_version: ProtocolVersion::V5, client_version: "reth/0.1.0".to_string(), capabilities: vec![Capability::new("eth".into(), EthVersion::Eth67 as usize)], - port: 30303, + port: DEFAULT_DISCOVERY_PORT, id, }); diff --git a/crates/net/eth-wire/src/p2pstream.rs b/crates/net/eth-wire/src/p2pstream.rs index 8b2b9e0fac4..b46d3ecd6a3 100644 --- a/crates/net/eth-wire/src/p2pstream.rs +++ b/crates/net/eth-wire/src/p2pstream.rs @@ -827,6 +827,7 @@ impl Decodable for ProtocolVersion { mod tests { use super::*; use crate::{DisconnectReason, EthVersion}; + use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_ecies::util::pk2id; use secp256k1::{SecretKey, SECP256K1}; use tokio::net::{TcpListener, TcpStream}; @@ -839,7 +840,7 @@ mod tests { protocol_version: ProtocolVersion::V5, client_version: "bitcoind/1.0.0".to_string(), capabilities: vec![EthVersion::Eth67.into()], - port: 30303, + port: DEFAULT_DISCOVERY_PORT, id: pk2id(&server_key.public_key(SECP256K1)), }; (hello, server_key) diff --git a/crates/net/eth-wire/src/types/broadcast.rs b/crates/net/eth-wire/src/types/broadcast.rs index c31ce72445f..18da1bd7568 100644 --- a/crates/net/eth-wire/src/types/broadcast.rs +++ b/crates/net/eth-wire/src/types/broadcast.rs @@ -80,6 +80,13 @@ pub struct Transactions( pub Vec, ); +impl Transactions { + /// Returns `true` if the list of transactions contains any blob transactions. + pub fn has_eip4844(&self) -> bool { + self.0.iter().any(|tx| tx.is_eip4844()) + } +} + impl From> for Transactions { fn from(txs: Vec) -> Self { Transactions(txs) diff --git a/crates/net/network-api/Cargo.toml b/crates/net/network-api/Cargo.toml index ea7bbb6acc3..90cea6f875d 100644 --- a/crates/net/network-api/Cargo.toml +++ b/crates/net/network-api/Cargo.toml @@ -13,6 +13,7 @@ description = "Network interfaces" reth-primitives.workspace = true reth-eth-wire = { path = "../eth-wire" } reth-rpc-types.workspace = true +reth-discv4 = { path = "../discv4" } # io serde = { workspace = true, features = ["derive"], optional = true } diff --git a/crates/net/network-api/src/noop.rs b/crates/net/network-api/src/noop.rs index a6e238592f6..26790f9b186 100644 --- a/crates/net/network-api/src/noop.rs +++ b/crates/net/network-api/src/noop.rs @@ -7,6 +7,7 @@ use crate::{ NetworkError, NetworkInfo, PeerKind, Peers, PeersInfo, Reputation, ReputationChangeKind, }; use async_trait::async_trait; +use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_eth_wire::{DisconnectReason, ProtocolVersion}; use reth_primitives::{Chain, NodeRecord, PeerId}; use reth_rpc_types::{EthProtocolInfo, NetworkStatus}; @@ -22,7 +23,7 @@ pub struct NoopNetwork; #[async_trait] impl NetworkInfo for NoopNetwork { fn local_addr(&self) -> SocketAddr { - (IpAddr::from(std::net::Ipv4Addr::UNSPECIFIED), 30303).into() + (IpAddr::from(std::net::Ipv4Addr::UNSPECIFIED), DEFAULT_DISCOVERY_PORT).into() } async fn network_status(&self) -> Result { diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index defa0db7fbe..c517e2336f8 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -47,7 +47,7 @@ serde_json = { workspace = true, optional = true } # metrics reth-metrics = { workspace = true, features = ["common"] } -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true # misc auto_impl = "1" @@ -62,7 +62,7 @@ linked-hash-map = "0.5.6" rand.workspace = true secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } -enr = { version = "0.8.1", features = ["rust-secp256k1"], optional = true } +enr = { workspace = true, features = ["rust-secp256k1"], optional = true } ethers-core = { workspace = true, default-features = false, optional = true } tempfile = { version = "3.3", optional = true } @@ -84,7 +84,7 @@ ethers-providers = { workspace = true, default-features = false, features = ["ws ethers-signers = { workspace = true, default-features = false } ethers-middleware = { workspace = true, default-features = false } -enr = { version = "0.8.1", features = ["serde", "rust-secp256k1"] } +enr = { workspace = true, features = ["serde", "rust-secp256k1"] } # misc hex = "0.4" diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 626423a2706..a0ae98c391b 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -82,16 +82,18 @@ pub struct NetworkConfig { // === impl NetworkConfig === -impl NetworkConfig { - /// Create a new instance with all mandatory fields set, rest is field with defaults. - pub fn new(client: C, secret_key: SecretKey) -> Self { - Self::builder(secret_key).build(client) - } - +impl NetworkConfig<()> { /// Convenience method for creating the corresponding builder type pub fn builder(secret_key: SecretKey) -> NetworkConfigBuilder { NetworkConfigBuilder::new(secret_key) } +} + +impl NetworkConfig { + /// Create a new instance with all mandatory fields set, rest is field with defaults. + pub fn new(client: C, secret_key: SecretKey) -> Self { + NetworkConfig::builder(secret_key).build(client) + } /// Sets the config to use for the discovery v4 protocol. pub fn set_discovery_v4(mut self, discovery_config: Discv4Config) -> Self { diff --git a/crates/net/network/src/lib.rs b/crates/net/network/src/lib.rs index e8a7b6760a9..968377d5e9e 100644 --- a/crates/net/network/src/lib.rs +++ b/crates/net/network/src/lib.rs @@ -71,7 +71,7 @@ //! // The key that's used for encrypting sessions and to identify our node. //! let local_key = rng_secret_key(); //! -//! let config = NetworkConfig::::builder(local_key).boot_nodes( +//! let config = NetworkConfig::builder(local_key).boot_nodes( //! mainnet_nodes() //! ).build(client); //! @@ -101,7 +101,7 @@ //! let local_key = rng_secret_key(); //! //! let config = -//! NetworkConfig::::builder(local_key).boot_nodes(mainnet_nodes()).build(client.clone()); +//! NetworkConfig::builder(local_key).boot_nodes(mainnet_nodes()).build(client.clone()); //! //! // create the network instance //! let (handle, network, transactions, request_handler) = NetworkManager::builder(config) diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index fd69dd3725e..8871a1f47be 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -278,7 +278,7 @@ where /// let local_key = rng_secret_key(); /// /// let config = - /// NetworkConfig::::builder(local_key).boot_nodes(mainnet_nodes()).build(client.clone()); + /// NetworkConfig::builder(local_key).boot_nodes(mainnet_nodes()).build(client.clone()); /// /// // create the network instance /// let (handle, network, transactions, request_handler) = NetworkManager::builder(config) diff --git a/crates/net/network/src/transactions.rs b/crates/net/network/src/transactions.rs index 6e6ed363478..f9c594b1d40 100644 --- a/crates/net/network/src/transactions.rs +++ b/crates/net/network/src/transactions.rs @@ -19,13 +19,13 @@ use reth_interfaces::{ use reth_metrics::common::mpsc::UnboundedMeteredReceiver; use reth_network_api::{Peers, ReputationChangeKind}; use reth_primitives::{ - FromRecoveredTransaction, IntoRecoveredTransaction, PeerId, TransactionSigned, TxHash, TxType, - H256, + FromRecoveredPooledTransaction, IntoRecoveredTransaction, PeerId, PooledTransactionsElement, + TransactionSigned, TxHash, TxType, H256, }; use reth_rlp::Encodable; use reth_transaction_pool::{ - error::PoolResult, PoolTransaction, PropagateKind, PropagatedTransactions, TransactionPool, - ValidPoolTransaction, + error::PoolResult, GetPooledTransactionLimit, PoolTransaction, PropagateKind, + PropagatedTransactions, TransactionPool, ValidPoolTransaction, }; use std::{ collections::{hash_map::Entry, HashMap}, @@ -52,6 +52,10 @@ const MAX_FULL_TRANSACTIONS_PACKET_SIZE: usize = 100 * 1024; /// const GET_POOLED_TRANSACTION_SOFT_LIMIT_NUM_HASHES: usize = 256; +/// Softlimit for the response size of a GetPooledTransactions message (2MB) +const GET_POOLED_TRANSACTION_SOFT_LIMIT_SIZE: GetPooledTransactionLimit = + GetPooledTransactionLimit::SizeSoftLimit(2 * 1024 * 1024); + /// The future for inserting a function into the pool pub type PoolImportFuture = Pin> + Send + 'static>>; @@ -164,7 +168,6 @@ impl TransactionsManager { impl TransactionsManager where Pool: TransactionPool + 'static, - ::Transaction: IntoRecoveredTransaction, { /// Returns a new handle that can send commands to this type. pub fn handle(&self) -> TransactionsHandle { @@ -191,16 +194,13 @@ where // TODO softResponseLimit 2 * 1024 * 1024 let transactions = self .pool - .get_all(request.0) - .into_iter() - .map(|tx| tx.transaction.to_recovered_transaction().into_signed()) - .collect::>(); + .get_pooled_transaction_elements(request.0, GET_POOLED_TRANSACTION_SOFT_LIMIT_SIZE); - // we sent a response at which point we assume that the peer is aware of the transaction - peer.transactions.extend(transactions.iter().map(|tx| tx.hash())); + // we sent a response at which point we assume that the peer is aware of the + // transactions + peer.transactions.extend(transactions.iter().map(|tx| *tx.hash())); - // TODO: remove this! this will be different when we introduce the blobpool - let resp = PooledTransactions(transactions.into_iter().map(Into::into).collect()); + let resp = PooledTransactions(transactions); let _ = response.send(Ok(resp)); } } @@ -392,7 +392,22 @@ where fn on_network_tx_event(&mut self, event: NetworkTransactionEvent) { match event { NetworkTransactionEvent::IncomingTransactions { peer_id, msg } => { - self.import_transactions(peer_id, msg.0, TransactionSource::Broadcast); + // ensure we didn't receive any blob transactions as these are disallowed to be + // broadcasted in full + + let has_blob_txs = msg.has_eip4844(); + + let non_blob_txs = msg + .0 + .into_iter() + .map(PooledTransactionsElement::try_from_broadcast) + .filter_map(Result::ok); + + self.import_transactions(peer_id, non_blob_txs, TransactionSource::Broadcast); + + if has_blob_txs { + self.report_peer(peer_id, ReputationChangeKind::BadTransactions); + } } NetworkTransactionEvent::IncomingPooledTransactionHashes { peer_id, msg } => { self.on_new_pooled_transaction_hashes(peer_id, msg) @@ -469,7 +484,7 @@ where fn import_transactions( &mut self, peer_id: PeerId, - transactions: Vec, + transactions: impl IntoIterator, source: TransactionSource, ) { // If the node is pipeline syncing, ignore transactions @@ -488,7 +503,7 @@ where if let Some(peer) = self.peers.get_mut(&peer_id) { for tx in transactions { // recover transaction - let tx = if let Some(tx) = tx.into_ecrecovered() { + let tx = if let Ok(tx) = tx.try_into_ecrecovered() { tx } else { has_bad_transactions = true; @@ -499,18 +514,18 @@ where // If we received the transactions as the response to our GetPooledTransactions // requests (based on received `NewPooledTransactionHashes`) then we already // recorded the hashes in [`Self::on_new_pooled_transaction_hashes`] - if source.is_broadcast() && !peer.transactions.insert(tx.hash()) { + if source.is_broadcast() && !peer.transactions.insert(*tx.hash()) { num_already_seen += 1; } - match self.transactions_by_peers.entry(tx.hash()) { + match self.transactions_by_peers.entry(*tx.hash()) { Entry::Occupied(mut entry) => { // transaction was already inserted entry.get_mut().push(peer_id); } Entry::Vacant(entry) => { // this is a new transaction that should be imported into the pool - let pool_transaction = ::from_recovered_transaction(tx); + let pool_transaction = ::from_recovered_transaction(tx); let pool = self.pool.clone(); @@ -608,11 +623,7 @@ where { match result { Ok(Ok(txs)) => { - // convert all transactions to the inner transaction type, ignoring any - // sidecars - // TODO: remove this! this will be different when we introduce the blobpool - let transactions = txs.0.into_iter().map(|tx| tx.into_transaction()).collect(); - this.import_transactions(peer_id, transactions, TransactionSource::Response) + this.import_transactions(peer_id, txs.0, TransactionSource::Response) } Ok(Err(req_err)) => { this.on_request_error(peer_id, req_err); @@ -850,6 +861,8 @@ enum TransactionsCommand { #[allow(missing_docs)] pub enum NetworkTransactionEvent { /// Received list of transactions from the given peer. + /// + /// This represents transactions that were broadcasted to use from the peer. IncomingTransactions { peer_id: PeerId, msg: Transactions }, /// Received list of transactions hashes to the given peer. IncomingPooledTransactionHashes { peer_id: PeerId, msg: NewPooledTransactionHashes }, diff --git a/crates/net/network/tests/it/clique/clique.rs b/crates/net/network/tests/it/clique/geth.rs similarity index 100% rename from crates/net/network/tests/it/clique/clique.rs rename to crates/net/network/tests/it/clique/geth.rs diff --git a/crates/net/network/tests/it/clique/mod.rs b/crates/net/network/tests/it/clique/mod.rs index fd635c3cab2..a8b2b8894db 100644 --- a/crates/net/network/tests/it/clique/mod.rs +++ b/crates/net/network/tests/it/clique/mod.rs @@ -1,5 +1,5 @@ -pub mod clique; pub mod clique_middleware; +mod geth; -pub use clique::CliqueGethInstance; pub use clique_middleware::{CliqueError, CliqueMiddleware, CliqueMiddlewareError}; +pub use geth::CliqueGethInstance; diff --git a/crates/net/network/tests/it/geth.rs b/crates/net/network/tests/it/geth.rs index 3c9f8d41ff2..a21637ecf09 100644 --- a/crates/net/network/tests/it/geth.rs +++ b/crates/net/network/tests/it/geth.rs @@ -32,7 +32,7 @@ async fn can_peer_with_geth() { "setting up reth networking stack in keepalive test" ); - let config = NetworkConfig::>::builder(secret_key) + let config = NetworkConfig::builder(secret_key) .listener_addr(reth_p2p) .discovery_addr(reth_disc) .chain_spec(chainspec) diff --git a/crates/payload/basic/Cargo.toml b/crates/payload/basic/Cargo.toml index a61b379adee..59ee201bf9b 100644 --- a/crates/payload/basic/Cargo.toml +++ b/crates/payload/basic/Cargo.toml @@ -28,7 +28,7 @@ futures-util.workspace = true # metrics reth-metrics.workspace = true -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true ## misc tracing.workspace = true diff --git a/crates/payload/builder/Cargo.toml b/crates/payload/builder/Cargo.toml index 893b868dc3b..d83331baa62 100644 --- a/crates/payload/builder/Cargo.toml +++ b/crates/payload/builder/Cargo.toml @@ -26,7 +26,7 @@ futures-util.workspace = true ## metrics reth-metrics.workspace = true -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true ## misc thiserror.workspace = true diff --git a/crates/payload/builder/src/payload.rs b/crates/payload/builder/src/payload.rs index 7ddb109a5cd..f96aabda9fe 100644 --- a/crates/payload/builder/src/payload.rs +++ b/crates/payload/builder/src/payload.rs @@ -72,7 +72,11 @@ impl From for ExecutionPayloadEnvelope { fn from(value: BuiltPayload) -> Self { let BuiltPayload { block, fees, .. } = value; - ExecutionPayloadEnvelope { block_value: fees, payload: block.into() } + ExecutionPayloadEnvelope { + block_value: fees, + payload: block.into(), + should_override_builder: None, + } } } diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index 55375f5b35d..d1d2fac91bd 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -7,7 +7,7 @@ use crate::{ header::Head, proofs::genesis_state_root, Address, BlockNumber, Chain, ForkFilter, ForkHash, ForkId, Genesis, Hardfork, Header, - SealedHeader, H160, H256, U256, + PruneBatchSizes, SealedHeader, H160, H256, U256, }; use hex_literal::hex; use once_cell::sync::Lazy; @@ -68,9 +68,10 @@ pub static MAINNET: Lazy> = Lazy::new(|| { 11052984, H256(hex!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5")), )), + base_fee_params: BaseFeeParams::ethereum(), + prune_batch_sizes: PruneBatchSizes::mainnet(), #[cfg(feature = "optimism")] optimism: false, - ..Default::default() } .into() }); @@ -111,9 +112,10 @@ pub static GOERLI: Lazy> = Lazy::new(|| { 4367322, H256(hex!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5")), )), + base_fee_params: BaseFeeParams::ethereum(), + prune_batch_sizes: PruneBatchSizes::testnet(), #[cfg(feature = "optimism")] optimism: false, - ..Default::default() } .into() }); @@ -158,9 +160,11 @@ pub static SEPOLIA: Lazy> = Lazy::new(|| { 1273020, H256(hex!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5")), )), + + base_fee_params: BaseFeeParams::ethereum(), + prune_batch_sizes: PruneBatchSizes::testnet(), #[cfg(feature = "optimism")] optimism: false, - ..Default::default() } .into() }); @@ -216,7 +220,7 @@ pub struct BaseFeeParams { } impl BaseFeeParams { - /// Get the base fee parameters for ethereum mainnet + /// Get the base fee parameters for Ethereum mainnet pub const fn ethereum() -> BaseFeeParams { BaseFeeParams { max_change_denominator: EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR, @@ -268,6 +272,7 @@ pub static OP_GOERLI: Lazy> = Lazy::new(|| { (Hardfork::Regolith, ForkCondition::Timestamp(1679079600)), ]), base_fee_params: BaseFeeParams::optimism(), + prune_batch_sizes: PruneBatchSizes::testnet(), optimism: true, ..Default::default() } @@ -308,6 +313,7 @@ pub static BASE_GOERLI: Lazy> = Lazy::new(|| { (Hardfork::Regolith, ForkCondition::Timestamp(1683219600)), ]), base_fee_params: BaseFeeParams::optimism(), + prune_batch_sizes: PruneBatchSizes::testnet(), optimism: true, ..Default::default() } @@ -348,6 +354,7 @@ pub static BASE_MAINNET: Lazy> = Lazy::new(|| { (Hardfork::Regolith, ForkCondition::Timestamp(0)), ]), base_fee_params: BaseFeeParams::optimism(), + prune_batch_sizes: PruneBatchSizes::mainnet(), optimism: true, ..Default::default() } @@ -389,13 +396,19 @@ pub struct ChainSpec { /// The active hard forks and their activation conditions pub hardforks: BTreeMap, - /// The deposit contract deployed for PoS. + /// The deposit contract deployed for PoS #[serde(skip, default)] pub deposit_contract: Option, /// The parameters that configure how a block's base fee is computed pub base_fee_params: BaseFeeParams, + /// The batch sizes for pruner, per block. In the actual pruner run it will be multiplied by + /// the amount of blocks between pruner runs to account for the difference in amount of new + /// data coming in. + #[serde(default)] + pub prune_batch_sizes: PruneBatchSizes, + /// Optimism configuration #[cfg(feature = "optimism")] pub optimism: bool, @@ -412,6 +425,7 @@ impl Default for ChainSpec { hardforks: Default::default(), deposit_contract: Default::default(), base_fee_params: BaseFeeParams::ethereum(), + prune_batch_sizes: Default::default(), #[cfg(feature = "optimism")] optimism: Default::default(), } diff --git a/crates/primitives/src/constants/eip4844.rs b/crates/primitives/src/constants/eip4844.rs index 602229d2cc8..69f6cbce76b 100644 --- a/crates/primitives/src/constants/eip4844.rs +++ b/crates/primitives/src/constants/eip4844.rs @@ -28,15 +28,47 @@ pub const TARGET_BLOBS_PER_BLOCK: u64 = TARGET_DATA_GAS_PER_BLOCK / DATA_GAS_PER /// Used to determine the price for next data blob pub const BLOB_GASPRICE_UPDATE_FRACTION: u64 = 3_338_477u64; // 3338477 +/// Commitment version of a KZG commitment +pub const VERSIONED_HASH_VERSION_KZG: u8 = 0x01; + /// KZG Trusted setup raw -const TRUSTED_SETUP_RAW: &str = include_str!("../../res/eip4844/trusted_setup.txt"); +const TRUSTED_SETUP_RAW: &[u8] = include_bytes!("../../res/eip4844/trusted_setup.txt"); /// KZG trusted setup pub static KZG_TRUSTED_SETUP: Lazy> = Lazy::new(|| { - let mut file = tempfile::NamedTempFile::new().unwrap(); - file.write_all(TRUSTED_SETUP_RAW.as_bytes()).unwrap(); - Arc::new(KzgSettings::load_trusted_setup_file(file.path().into()).unwrap()) + Arc::new( + load_trusted_setup_from_bytes(TRUSTED_SETUP_RAW).expect("Failed to load trusted setup"), + ) }); -/// Commitment version of a KZG commitment -pub const VERSIONED_HASH_VERSION_KZG: u8 = 0x01; +/// Loads the trusted setup parameters from the given bytes and returns the [KzgSettings]. +/// +/// This creates a temp file to store the bytes and then loads the [KzgSettings] from the file via +/// [KzgSettings::load_trusted_setup_file]. +pub fn load_trusted_setup_from_bytes(bytes: &[u8]) -> Result { + let mut file = tempfile::NamedTempFile::new().map_err(LoadKzgSettingsError::TempFileErr)?; + file.write_all(bytes).map_err(LoadKzgSettingsError::TempFileErr)?; + KzgSettings::load_trusted_setup_file(file.path().into()).map_err(LoadKzgSettingsError::KzgError) +} + +/// Error type for loading the trusted setup. +#[derive(Debug, thiserror::Error)] +pub enum LoadKzgSettingsError { + /// Failed to create temp file to store bytes for loading [KzgSettings] via + /// [KzgSettings::load_trusted_setup_file]. + #[error("Failed to setup temp file: {0:?}")] + TempFileErr(#[from] std::io::Error), + /// Kzg error + #[error("Kzg error: {0:?}")] + KzgError(c_kzg::Error), +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn ensure_load_kzg_settings() { + let _settings = Arc::clone(&KZG_TRUSTED_SETUP); + } +} diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 816b401ab50..5c6e0fd57a5 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -83,8 +83,8 @@ pub use net::{ }; pub use peer::{PeerId, WithPeerId}; pub use prune::{ - ContractLogsPruneConfig, PruneCheckpoint, PruneMode, PruneModes, PrunePart, PrunePartError, - MINIMUM_PRUNING_DISTANCE, + PruneBatchSizes, PruneCheckpoint, PruneMode, PruneModes, PrunePart, PrunePartError, + ReceiptsLogPruneConfig, MINIMUM_PRUNING_DISTANCE, }; pub use receipt::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef}; pub use revm_primitives::JumpMap; @@ -93,11 +93,12 @@ pub use storage::StorageEntry; pub use transaction::{ util::secp256k1::{public_key_to_address, recover_signer, sign_message}, AccessList, AccessListItem, AccessListWithGasUsed, BlobTransaction, BlobTransactionSidecar, - FromRecoveredTransaction, IntoRecoveredTransaction, InvalidTransactionError, - PooledTransactionsElement, PooledTransactionsElementEcRecovered, Signature, Transaction, - TransactionKind, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, - TransactionSignedNoHash, TxEip1559, TxEip2930, TxEip4844, TxLegacy, TxType, EIP1559_TX_TYPE_ID, - EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, + BlobTransactionValidationError, FromRecoveredPooledTransaction, FromRecoveredTransaction, + IntoRecoveredTransaction, InvalidTransactionError, PooledTransactionsElement, + PooledTransactionsElementEcRecovered, Signature, Transaction, TransactionKind, TransactionMeta, + TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxEip1559, TxEip2930, + TxEip4844, TxLegacy, TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, + LEGACY_TX_TYPE_ID, }; #[cfg(feature = "optimism")] pub use transaction::{TxDeposit, DEPOSIT_TX_TYPE_ID}; diff --git a/crates/primitives/src/prune/batch_sizes.rs b/crates/primitives/src/prune/batch_sizes.rs new file mode 100644 index 00000000000..9498ea627b4 --- /dev/null +++ b/crates/primitives/src/prune/batch_sizes.rs @@ -0,0 +1,83 @@ +use paste::paste; +use serde::{Deserialize, Serialize}; + +/// Batch sizes for configuring the pruner. +/// The batch size for each prune part should be both large enough to prune the data which was +/// generated with each new block, and small enough to not generate an excessive load on the +/// database due to deletion of too many rows at once. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +pub struct PruneBatchSizes { + /// Maximum number of receipts to prune, per block. + receipts: usize, + /// Maximum number of transaction lookup entries to prune, per block. + transaction_lookup: usize, + /// Maximum number of transaction senders to prune, per block. + transaction_senders: usize, + /// Maximum number of account history entries to prune, per block. + /// Measured in the number of `AccountChangeSet` table rows. + account_history: usize, + /// Maximum number of storage history entries to prune, per block. + /// Measured in the number of `StorageChangeSet` table rows. + storage_history: usize, +} + +macro_rules! impl_prune_batch_size_methods { + ($(($human_name:expr, $name:ident)),+) => { + paste! { + impl PruneBatchSizes { + $( + #[doc = concat!("Maximum number of ", $human_name, " to prune, accounting for the block interval.")] + pub fn $name(&self, block_interval: usize) -> usize { + self.$name * block_interval + } + + #[doc = concat!("Set the maximum number of ", $human_name, " to prune per block.")] + pub fn [](mut self, batch_size: usize) -> Self { + self.$name = batch_size; + self + } + )+ + } + } + }; +} + +impl_prune_batch_size_methods!( + ("receipts", receipts), + ("transaction lookup entries", transaction_lookup), + ("transaction senders", transaction_senders), + ("account history entries", account_history), + ("storage history entries", storage_history) +); + +impl PruneBatchSizes { + /// Default prune batch sizes for Ethereum mainnet. + /// These settings are sufficient to prune more data than generated with each new block. + pub const fn mainnet() -> Self { + Self { + receipts: 250, + transaction_lookup: 250, + transaction_senders: 250, + account_history: 1000, + storage_history: 1000, + } + } + + /// Default prune batch sizes for Ethereum testnets. + /// These settings are sufficient to prune more data than generated with each new block. + pub const fn testnet() -> Self { + Self { + receipts: 100, + transaction_lookup: 100, + transaction_senders: 100, + account_history: 500, + storage_history: 500, + } + } +} + +impl Default for PruneBatchSizes { + fn default() -> Self { + Self::mainnet() + } +} diff --git a/crates/primitives/src/prune/checkpoint.rs b/crates/primitives/src/prune/checkpoint.rs index 52e1cabd76c..8096d2067af 100644 --- a/crates/primitives/src/prune/checkpoint.rs +++ b/crates/primitives/src/prune/checkpoint.rs @@ -1,4 +1,4 @@ -use crate::{prune::PruneMode, BlockNumber}; +use crate::{prune::PruneMode, BlockNumber, TxNumber}; use reth_codecs::{main_codec, Compact}; /// Saves the pruning progress of a stage. @@ -7,7 +7,10 @@ use reth_codecs::{main_codec, Compact}; #[cfg_attr(test, derive(Default))] pub struct PruneCheckpoint { /// Highest pruned block number. - pub block_number: BlockNumber, + /// If it's [None], the pruning for block `0` is not finished yet. + pub block_number: Option, + /// Highest pruned transaction number, if applicable. + pub tx_number: Option, /// Prune mode. pub prune_mode: PruneMode, } diff --git a/crates/primitives/src/prune/mod.rs b/crates/primitives/src/prune/mod.rs index 5359c3d9c72..a2249f1c5b2 100644 --- a/crates/primitives/src/prune/mod.rs +++ b/crates/primitives/src/prune/mod.rs @@ -1,9 +1,11 @@ +mod batch_sizes; mod checkpoint; mod mode; mod part; mod target; use crate::{Address, BlockNumber}; +pub use batch_sizes::PruneBatchSizes; pub use checkpoint::PruneCheckpoint; pub use mode::PruneMode; pub use part::{PrunePart, PrunePartError}; @@ -13,9 +15,9 @@ pub use target::{PruneModes, MINIMUM_PRUNING_DISTANCE}; /// Configuration for pruning receipts not associated with logs emitted by the specified contracts. #[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] -pub struct ContractLogsPruneConfig(pub BTreeMap); +pub struct ReceiptsLogPruneConfig(pub BTreeMap); -impl ContractLogsPruneConfig { +impl ReceiptsLogPruneConfig { /// Checks if the configuration is empty pub fn is_empty(&self) -> bool { self.0.is_empty() @@ -49,10 +51,14 @@ impl ContractLogsPruneConfig { // the BTreeMap (block = 0), otherwise it will be excluded. // Reminder that this BTreeMap works as an inclusion list that excludes (prunes) all // other receipts. + // + // Reminder, that we increment because the [`BlockNumber`] key of the new map should be + // viewed as `PruneMode::Before(block)` let block = (pruned_block + 1).max( mode.prune_target_block(tip, MINIMUM_PRUNING_DISTANCE, PrunePart::ContractLogs)? .map(|(block, _)| block) - .unwrap_or_default(), + .unwrap_or_default() + + 1, ); map.entry(block).or_insert_with(Vec::new).push(address) diff --git a/crates/primitives/src/prune/target.rs b/crates/primitives/src/prune/target.rs index 9620569760e..8789b0cf8f5 100644 --- a/crates/primitives/src/prune/target.rs +++ b/crates/primitives/src/prune/target.rs @@ -1,6 +1,6 @@ use crate::{ prune::PrunePartError, serde_helper::deserialize_opt_prune_mode_with_min_blocks, BlockNumber, - ContractLogsPruneConfig, PruneMode, PrunePart, + PruneMode, PrunePart, ReceiptsLogPruneConfig, }; use paste::paste; use serde::{Deserialize, Serialize}; @@ -23,8 +23,8 @@ pub struct PruneModes { /// Transaction Lookup pruning configuration. #[serde(skip_serializing_if = "Option::is_none")] pub transaction_lookup: Option, - /// Configuration for pruning of receipts. This setting overrides - /// `PruneModes::contract_logs_filter` and offers improved performance. + /// Receipts pruning configuration. This setting overrides `receipts_log_filter` + /// and offers improved performance. #[serde( skip_serializing_if = "Option::is_none", deserialize_with = "deserialize_opt_prune_mode_with_min_blocks::<64, _>" @@ -42,12 +42,12 @@ pub struct PruneModes { deserialize_with = "deserialize_opt_prune_mode_with_min_blocks::<64, _>" )] pub storage_history: Option, - /// Retains only those receipts that contain logs emitted by the specified addresses, - /// discarding all others. Note that this setting is overridden by `PruneModes::receipts`. + /// Receipts pruning configuration by retaining only those receipts that contain logs emitted + /// by the specified addresses, discarding others. This setting is overridden by `receipts`. /// /// The [`BlockNumber`] represents the starting block from which point onwards the receipts are /// preserved. - pub contract_logs_filter: ContractLogsPruneConfig, + pub receipts_log_filter: ReceiptsLogPruneConfig, } macro_rules! impl_prune_parts { @@ -90,7 +90,7 @@ macro_rules! impl_prune_parts { $( $part: Some(PruneMode::Full), )+ - contract_logs_filter: Default::default() + receipts_log_filter: Default::default() } } diff --git a/crates/primitives/src/transaction/eip4844.rs b/crates/primitives/src/transaction/eip4844.rs index 3092f284f17..1278e75e3b8 100644 --- a/crates/primitives/src/transaction/eip4844.rs +++ b/crates/primitives/src/transaction/eip4844.rs @@ -105,6 +105,68 @@ impl TxEip4844 { } } + /// Verifies that the given blob data, commitments, and proofs are all valid for this + /// transaction. + /// + /// Takes as input the [KzgSettings], which should contain the the parameters derived from the + /// KZG trusted setup. + /// + /// This ensures that the blob transaction payload has the same number of blob data elements, + /// commitments, and proofs. Each blob data element is verified against its commitment and + /// proof. + /// + /// Returns [BlobTransactionValidationError::InvalidProof] if any blob KZG proof in the response + /// fails to verify, or if the versioned hashes in the transaction do not match the actual + /// commitment versioned hashes. + pub fn validate_blob( + &self, + sidecar: &BlobTransactionSidecar, + proof_settings: &KzgSettings, + ) -> Result<(), BlobTransactionValidationError> { + // Ensure the versioned hashes and commitments have the same length + if self.blob_versioned_hashes.len() != sidecar.commitments.len() { + return Err(kzg::Error::MismatchLength(format!( + "There are {} versioned commitment hashes and {} commitments", + self.blob_versioned_hashes.len(), + sidecar.commitments.len() + )) + .into()) + } + + // zip and iterate, calculating versioned hashes + for (versioned_hash, commitment) in + self.blob_versioned_hashes.iter().zip(sidecar.commitments.iter()) + { + // convert to KzgCommitment + let commitment = KzgCommitment::from(*commitment.deref()); + + // Calculate the versioned hash + // + // TODO: should this method distinguish the type of validation failure? For example + // whether a certain versioned hash does not match, or whether the blob proof + // validation failed? + let calculated_versioned_hash = kzg_to_versioned_hash(commitment); + if *versioned_hash != calculated_versioned_hash { + return Err(BlobTransactionValidationError::InvalidProof) + } + } + + // Verify as a batch + let res = KzgProof::verify_blob_kzg_proof_batch( + sidecar.blobs.as_slice(), + sidecar.commitments.as_slice(), + sidecar.proofs.as_slice(), + proof_settings, + ) + .map_err(BlobTransactionValidationError::KZGError)?; + + if res { + Ok(()) + } else { + Err(BlobTransactionValidationError::InvalidProof) + } + } + /// Returns the total gas for all blobs in this transaction. #[inline] pub fn blob_gas(&self) -> u64 { @@ -252,12 +314,17 @@ impl TxEip4844 { } /// An error that can occur when validating a [BlobTransaction]. -#[derive(Debug)] +#[derive(Debug, thiserror::Error)] pub enum BlobTransactionValidationError { + /// Proof validation failed. + #[error("invalid kzg proof")] + InvalidProof, /// An error returned by the [kzg] library + #[error("kzg error: {0:?}")] KZGError(kzg::Error), /// The inner transaction is not a blob transaction - NotBlobTransaction(TxType), + #[error("unable to verify proof for non blob transaction: {0}")] + NotBlobTransaction(u8), } impl From for BlobTransactionValidationError { @@ -288,59 +355,32 @@ pub struct BlobTransaction { } impl BlobTransaction { - /// Verifies that the transaction's blob data, commitments, and proofs are all valid. - /// - /// Takes as input the [KzgSettings], which should contain the the parameters derived from the - /// KZG trusted setup. + /// Constructs a new [BlobTransaction] from a [TransactionSigned] and a + /// [BlobTransactionSidecar]. /// - /// This ensures that the blob transaction payload has the same number of blob data elements, - /// commitments, and proofs. Each blob data element is verified against its commitment and - /// proof. + /// Returns an error if the signed transaction is not [TxEip4844] + pub fn try_from_signed( + tx: TransactionSigned, + sidecar: BlobTransactionSidecar, + ) -> Result { + let TransactionSigned { transaction, signature, hash } = tx; + match transaction { + Transaction::Eip4844(transaction) => Ok(Self { hash, transaction, signature, sidecar }), + transaction => { + let tx = TransactionSigned { transaction, signature, hash }; + Err((tx, sidecar)) + } + } + } + + /// Verifies that the transaction's blob data, commitments, and proofs are all valid. /// - /// Returns `false` if any blob KZG proof in the response fails to verify, or if the versioned - /// hashes in the transaction do not match the actual commitment versioned hashes. + /// See also [TxEip4844::validate_blob] pub fn validate( &self, proof_settings: &KzgSettings, - ) -> Result { - let inner_tx = &self.transaction; - - // Ensure the versioned hashes and commitments have the same length - if inner_tx.blob_versioned_hashes.len() != self.sidecar.commitments.len() { - return Err(kzg::Error::MismatchLength(format!( - "There are {} versioned commitment hashes and {} commitments", - inner_tx.blob_versioned_hashes.len(), - self.sidecar.commitments.len() - )) - .into()) - } - - // zip and iterate, calculating versioned hashes - for (versioned_hash, commitment) in - inner_tx.blob_versioned_hashes.iter().zip(self.sidecar.commitments.iter()) - { - // convert to KzgCommitment - let commitment = KzgCommitment::from(*commitment.deref()); - - // Calculate the versioned hash - // - // TODO: should this method distinguish the type of validation failure? For example - // whether a certain versioned hash does not match, or whether the blob proof - // validation failed? - let calculated_versioned_hash = kzg_to_versioned_hash(commitment); - if *versioned_hash != calculated_versioned_hash { - return Ok(false) - } - } - - // Verify as a batch - KzgProof::verify_blob_kzg_proof_batch( - self.sidecar.blobs.as_slice(), - self.sidecar.commitments.as_slice(), - self.sidecar.proofs.as_slice(), - proof_settings, - ) - .map_err(Into::into) + ) -> Result<(), BlobTransactionValidationError> { + self.transaction.validate_blob(&self.sidecar, proof_settings) } /// Splits the [BlobTransaction] into its [TransactionSigned] and [BlobTransactionSidecar] diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 0fcd5ac69d3..e59ebb9d17f 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -21,7 +21,9 @@ pub use tx_type::{ pub use eip1559::TxEip1559; pub use eip2930::TxEip2930; -pub use eip4844::{BlobTransaction, BlobTransactionSidecar, TxEip4844}; +pub use eip4844::{ + BlobTransaction, BlobTransactionSidecar, BlobTransactionValidationError, TxEip4844, +}; pub use legacy::TxLegacy; pub use pooled::{PooledTransactionsElement, PooledTransactionsElementEcRecovered}; @@ -476,6 +478,30 @@ impl Transaction { Transaction::Deposit(tx) => tx.size(), } } + + /// Returns true if the transaction is a legacy transaction. + #[inline] + pub fn is_legacy(&self) -> bool { + matches!(self, Transaction::Legacy(_)) + } + + /// Returns true if the transaction is an EIP-2930 transaction. + #[inline] + pub fn is_eip2930(&self) -> bool { + matches!(self, Transaction::Eip2930(_)) + } + + /// Returns true if the transaction is an EIP-1559 transaction. + #[inline] + pub fn is_eip1559(&self) -> bool { + matches!(self, Transaction::Eip1559(_)) + } + + /// Returns true if the transaction is an EIP-4844 transaction. + #[inline] + pub fn is_eip4844(&self) -> bool { + matches!(self, Transaction::Eip4844(_)) + } } impl Compact for Transaction { @@ -1232,6 +1258,16 @@ impl FromRecoveredTransaction for TransactionSignedEcRecovered { } } +/// A transaction type that can be created from a [`PooledTransactionsElementEcRecovered`] +/// transaction. +/// +/// This is a conversion trait that'll ensure transactions received via P2P can be converted to the +/// transaction type that the transaction pool uses. +pub trait FromRecoveredPooledTransaction { + /// Converts to this type from the given [`PooledTransactionsElementEcRecovered`]. + fn from_recovered_transaction(tx: PooledTransactionsElementEcRecovered) -> Self; +} + /// The inverse of [`FromRecoveredTransaction`] that ensure the transaction can be sent over the /// network pub trait IntoRecoveredTransaction { diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index a1169b2c710..e29a39f95b5 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -59,6 +59,17 @@ pub enum PooledTransactionsElement { } impl PooledTransactionsElement { + /// Tries to convert a [TransactionSigned] into a [PooledTransactionsElement]. + /// + /// [BlobTransaction] are disallowed from being propagated, hence this returns an error if the + /// `tx` is [Transaction::Eip4844] + pub fn try_from_broadcast(tx: TransactionSigned) -> Result { + if tx.is_eip4844() { + return Err(tx) + } + Ok(tx.into()) + } + /// Heavy operation that return signature hash over rlp encoded transaction. /// It is only for signature signing or signer recovery. pub fn signature_hash(&self) -> H256 { @@ -72,6 +83,18 @@ impl PooledTransactionsElement { } } + /// Reference to transaction hash. Used to identify transaction. + pub fn hash(&self) -> &TxHash { + match self { + PooledTransactionsElement::Legacy { hash, .. } => hash, + PooledTransactionsElement::Eip2930 { hash, .. } => hash, + PooledTransactionsElement::Eip1559 { hash, .. } => hash, + PooledTransactionsElement::BlobTransaction(tx) => &tx.hash, + #[cfg(feature = "optimism")] + PooledTransactionsElement::Deposit { hash, .. } => hash, + } + } + /// Returns the signature of the transaction. pub fn signature(&self) -> &Signature { match self { @@ -423,6 +446,12 @@ impl PooledTransactionsElementEcRecovered { self.transaction } + /// Transform back to [`PooledTransactionsElement`] + pub fn into_ecrecovered_transaction(self) -> TransactionSignedEcRecovered { + let (tx, signer) = self.into_components(); + tx.into_ecrecovered_transaction(signer) + } + /// Desolve Self to its component pub fn into_components(self) -> (PooledTransactionsElement, Address) { (self.transaction, self.signer) diff --git a/crates/prune/Cargo.toml b/crates/prune/Cargo.toml index 79bc0aa9546..be5346d025a 100644 --- a/crates/prune/Cargo.toml +++ b/crates/prune/Cargo.toml @@ -19,7 +19,7 @@ reth-interfaces.workspace = true # metrics reth-metrics.workspace = true -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true # misc tracing.workspace = true diff --git a/crates/prune/src/lib.rs b/crates/prune/src/lib.rs index 56999c50c13..9c08cfff461 100644 --- a/crates/prune/src/lib.rs +++ b/crates/prune/src/lib.rs @@ -4,4 +4,4 @@ mod pruner; use crate::metrics::Metrics; pub use error::PrunerError; -pub use pruner::{BatchSizes, Pruner, PrunerResult, PrunerWithResult}; +pub use pruner::{Pruner, PrunerResult, PrunerWithResult}; diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index 5ae773c12a3..df2e8ce633d 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -12,54 +12,38 @@ use reth_db::{ BlockNumberList, }; use reth_primitives::{ - BlockNumber, ChainSpec, PruneCheckpoint, PruneMode, PruneModes, PrunePart, TxNumber, - MINIMUM_PRUNING_DISTANCE, + BlockNumber, ChainSpec, PruneBatchSizes, PruneCheckpoint, PruneMode, PruneModes, PrunePart, + TxNumber, MINIMUM_PRUNING_DISTANCE, }; use reth_provider::{ BlockReader, DatabaseProviderRW, ProviderFactory, PruneCheckpointReader, PruneCheckpointWriter, TransactionsProvider, }; use std::{ops::RangeInclusive, sync::Arc, time::Instant}; -use tracing::{debug, instrument, trace}; +use tracing::{debug, error, instrument, trace}; -/// Result of [Pruner::run] execution -pub type PrunerResult = Result<(), PrunerError>; +/// Result of [Pruner::run] execution. +/// +/// Returns `true` if pruning has been completed up to the target block, +/// and `false` if there's more data to prune in further runs. +pub type PrunerResult = Result; -/// The pipeline type itself with the result of [Pruner::run] +/// The pruner type itself with the result of [Pruner::run] pub type PrunerWithResult = (Pruner, PrunerResult); -pub struct BatchSizes { - receipts: usize, - transaction_lookup: usize, - transaction_senders: usize, - account_history: usize, - storage_history: usize, -} - -impl Default for BatchSizes { - fn default() -> Self { - Self { - receipts: 10000, - transaction_lookup: 10000, - transaction_senders: 10000, - account_history: 10000, - storage_history: 10000, - } - } -} - /// Pruning routine. Main pruning logic happens in [Pruner::run]. pub struct Pruner { metrics: Metrics, provider_factory: ProviderFactory, /// Minimum pruning interval measured in blocks. All prune parts are checked and, if needed, /// pruned, when the chain advances by the specified number of blocks. - min_block_interval: u64, + min_block_interval: usize, /// Last pruned block number. Used in conjunction with `min_block_interval` to determine /// when the pruning needs to be initiated. last_pruned_block_number: Option, modes: PruneModes, - batch_sizes: BatchSizes, + /// Maximum entries to prune per block, per prune part. + batch_sizes: PruneBatchSizes, } impl Pruner { @@ -67,9 +51,9 @@ impl Pruner { pub fn new( db: DB, chain_spec: Arc, - min_block_interval: u64, + min_block_interval: usize, modes: PruneModes, - batch_sizes: BatchSizes, + batch_sizes: PruneBatchSizes, ) -> Self { Self { metrics: Metrics::default(), @@ -83,77 +67,156 @@ impl Pruner { /// Run the pruner pub fn run(&mut self, tip_block_number: BlockNumber) -> PrunerResult { - trace!( - target: "pruner", - %tip_block_number, - "Pruner started" - ); + if tip_block_number == 0 { + self.last_pruned_block_number = Some(tip_block_number); + + trace!(target: "pruner", %tip_block_number, "Nothing to prune yet"); + return Ok(true) + } + + trace!(target: "pruner", %tip_block_number, "Pruner started"); let start = Instant::now(); let provider = self.provider_factory.provider_rw()?; + let mut done = true; + if let Some((to_block, prune_mode)) = self.modes.prune_target_block_receipts(tip_block_number)? { + trace!( + target: "pruner", + prune_part = ?PrunePart::Receipts, + %to_block, + ?prune_mode, + "Got target block to prune" + ); + let part_start = Instant::now(); - self.prune_receipts(&provider, to_block, prune_mode)?; + let part_done = self.prune_receipts(&provider, to_block, prune_mode)?; + done = done && part_done; self.metrics .get_prune_part_metrics(PrunePart::Receipts) .duration_seconds .record(part_start.elapsed()) + } else { + trace!(target: "pruner", prune_part = ?PrunePart::Receipts, "No target block to prune"); } - if !self.modes.contract_logs_filter.is_empty() { + if !self.modes.receipts_log_filter.is_empty() { let part_start = Instant::now(); - self.prune_receipts_by_logs(&provider, tip_block_number)?; + let part_done = self.prune_receipts_by_logs(&provider, tip_block_number)?; + done = done && part_done; self.metrics .get_prune_part_metrics(PrunePart::ContractLogs) .duration_seconds .record(part_start.elapsed()) + } else { + trace!(target: "pruner", prune_part = ?PrunePart::ContractLogs, "No filter to prune"); } if let Some((to_block, prune_mode)) = self.modes.prune_target_block_transaction_lookup(tip_block_number)? { + trace!( + target: "pruner", + prune_part = ?PrunePart::TransactionLookup, + %to_block, + ?prune_mode, + "Got target block to prune" + ); + let part_start = Instant::now(); - self.prune_transaction_lookup(&provider, to_block, prune_mode)?; + let part_done = self.prune_transaction_lookup(&provider, to_block, prune_mode)?; + done = done && part_done; self.metrics .get_prune_part_metrics(PrunePart::TransactionLookup) .duration_seconds .record(part_start.elapsed()) + } else { + trace!( + target: "pruner", + prune_part = ?PrunePart::TransactionLookup, + "No target block to prune" + ); } if let Some((to_block, prune_mode)) = self.modes.prune_target_block_sender_recovery(tip_block_number)? { + trace!( + target: "pruner", + prune_part = ?PrunePart::SenderRecovery, + %to_block, + ?prune_mode, + "Got target block to prune" + ); + let part_start = Instant::now(); - self.prune_transaction_senders(&provider, to_block, prune_mode)?; + let part_done = self.prune_transaction_senders(&provider, to_block, prune_mode)?; + done = done && part_done; self.metrics .get_prune_part_metrics(PrunePart::SenderRecovery) .duration_seconds .record(part_start.elapsed()) + } else { + trace!( + target: "pruner", + prune_part = ?PrunePart::SenderRecovery, + "No target block to prune" + ); } if let Some((to_block, prune_mode)) = self.modes.prune_target_block_account_history(tip_block_number)? { + trace!( + target: "pruner", + prune_part = ?PrunePart::AccountHistory, + %to_block, + ?prune_mode, + "Got target block to prune" + ); + let part_start = Instant::now(); - self.prune_account_history(&provider, to_block, prune_mode)?; + let part_done = self.prune_account_history(&provider, to_block, prune_mode)?; + done = done && part_done; self.metrics .get_prune_part_metrics(PrunePart::AccountHistory) .duration_seconds .record(part_start.elapsed()) + } else { + trace!( + target: "pruner", + prune_part = ?PrunePart::AccountHistory, + "No target block to prune" + ); } if let Some((to_block, prune_mode)) = self.modes.prune_target_block_storage_history(tip_block_number)? { + trace!( + target: "pruner", + prune_part = ?PrunePart::StorageHistory, + %to_block, + ?prune_mode, + "Got target block to prune" + ); + let part_start = Instant::now(); - self.prune_storage_history(&provider, to_block, prune_mode)?; + let part_done = self.prune_storage_history(&provider, to_block, prune_mode)?; + done = done && part_done; self.metrics .get_prune_part_metrics(PrunePart::StorageHistory) .duration_seconds .record(part_start.elapsed()) + } else { + trace!( + target: "pruner", + prune_part = ?PrunePart::StorageHistory, + "No target block to prune" + ); } provider.commit()?; @@ -162,13 +225,8 @@ impl Pruner { let elapsed = start.elapsed(); self.metrics.duration_seconds.record(elapsed); - trace!( - target: "pruner", - %tip_block_number, - ?elapsed, - "Pruner finished" - ); - Ok(()) + trace!(target: "pruner", %tip_block_number, ?elapsed, "Pruner finished"); + Ok(done) } /// Returns `true` if the pruning is needed at the provided tip block number. @@ -178,7 +236,8 @@ impl Pruner { // Saturating subtraction is needed for the case when the chain was reverted, meaning // current block number might be less than the previously pruned block number. If // that's the case, no pruning is needed as outdated data is also reverted. - tip_block_number.saturating_sub(last_pruned_block_number) >= self.min_block_interval + tip_block_number.saturating_sub(last_pruned_block_number) >= + self.min_block_interval as u64 }) { debug!( target: "pruner", @@ -192,6 +251,36 @@ impl Pruner { } } + /// Get next inclusive block range to prune according to the checkpoint, `to_block` block + /// number and `limit`. + /// + /// To get the range start (`from_block`): + /// 1. If checkpoint exists, use next block. + /// 2. If checkpoint doesn't exist, use block 0. + /// + /// To get the range end: use block `to_block`. + fn get_next_block_range_from_checkpoint( + &self, + provider: &DatabaseProviderRW<'_, DB>, + prune_part: PrunePart, + to_block: BlockNumber, + ) -> reth_interfaces::Result>> { + let from_block = provider + .get_prune_checkpoint(prune_part)? + .and_then(|checkpoint| checkpoint.block_number) + // Checkpoint exists, prune from the next block after the highest pruned one + .map(|block_number| block_number + 1) + // No checkpoint exists, prune from genesis + .unwrap_or(0); + + let range = from_block..=to_block; + if range.is_empty() { + return Ok(None) + } + + Ok(Some(range)) + } + /// Get next inclusive tx number range to prune according to the checkpoint and `to_block` block /// number. /// @@ -206,30 +295,34 @@ impl Pruner { prune_part: PrunePart, to_block: BlockNumber, ) -> reth_interfaces::Result>> { - let from_block_number = provider + let from_tx_number = provider .get_prune_checkpoint(prune_part)? - // Checkpoint exists, prune from the next block after the highest pruned one - .map(|checkpoint| checkpoint.block_number + 1) + // Checkpoint exists, prune from the next transaction after the highest pruned one + .and_then(|checkpoint| match checkpoint.tx_number { + Some(tx_number) => Some(tx_number + 1), + _ => { + error!(target: "pruner", %prune_part, ?checkpoint, "Expected transaction number in prune checkpoint, found None"); + None + }, + }) // No checkpoint exists, prune from genesis .unwrap_or(0); - // Get first transaction - let from_tx_num = - provider.block_body_indices(from_block_number)?.map(|body| body.first_tx_num); - // If no block body index is found, the DB is either corrupted or we've already pruned up to - // the latest block, so there's no thing to prune now. - let Some(from_tx_num) = from_tx_num else { return Ok(None) }; - - let to_tx_num = match provider.block_body_indices(to_block)? { + let to_tx_number = match provider.block_body_indices(to_block)? { Some(body) => body, None => return Ok(None), } .last_tx_num(); - Ok(Some(from_tx_num..=to_tx_num)) + let range = from_tx_number..=to_tx_number; + if range.is_empty() { + return Ok(None) + } + + Ok(Some(range)) } - /// Prune receipts up to the provided block, inclusive. + /// Prune receipts up to the provided block, inclusive, respecting the batch size. #[instrument(level = "trace", skip(self, provider), target = "pruner")] fn prune_receipts( &self, @@ -237,7 +330,7 @@ impl Pruner { to_block: BlockNumber, prune_mode: PruneMode, ) -> PrunerResult { - let range = match self.get_next_tx_num_range_from_checkpoint( + let tx_range = match self.get_next_tx_num_range_from_checkpoint( provider, PrunePart::Receipts, to_block, @@ -245,42 +338,44 @@ impl Pruner { Some(range) => range, None => { trace!(target: "pruner", "No receipts to prune"); - return Ok(()) + return Ok(true) } }; - let total = range.clone().count(); + let tx_range_end = *tx_range.end(); - provider.prune_table_with_iterator_in_batches::( - range, - self.batch_sizes.receipts, - |rows| { - trace!( - target: "pruner", - %rows, - progress = format!("{:.1}%", 100.0 * rows as f64 / total as f64), - "Pruned receipts" - ); - }, + let mut last_pruned_transaction = tx_range_end; + let (deleted, done) = provider.prune_table_with_range::( + tx_range, + self.batch_sizes.receipts(self.min_block_interval), |_| false, + |row| last_pruned_transaction = row.0, )?; + trace!(target: "pruner", %deleted, %done, "Pruned receipts"); + + let last_pruned_block = provider + .transaction_block(last_pruned_transaction)? + .ok_or(PrunerError::InconsistentData("Block for transaction is not found"))? + // If there's more receipts to prune, set the checkpoint block number to previous, + // so we could finish pruning its receipts on the next run. + .checked_sub(if done { 0 } else { 1 }); + + let prune_checkpoint = PruneCheckpoint { + block_number: last_pruned_block, + tx_number: Some(last_pruned_transaction), + prune_mode, + }; - provider.save_prune_checkpoint( - PrunePart::Receipts, - PruneCheckpoint { block_number: to_block, prune_mode }, - )?; + provider.save_prune_checkpoint(PrunePart::Receipts, prune_checkpoint)?; // `PrunePart::Receipts` overrides `PrunePart::ContractLogs`, so we can preemptively // limit their pruning start point. - provider.save_prune_checkpoint( - PrunePart::ContractLogs, - PruneCheckpoint { block_number: to_block, prune_mode }, - )?; + provider.save_prune_checkpoint(PrunePart::ContractLogs, prune_checkpoint)?; - Ok(()) + Ok(done) } - /// Prune receipts up to the provided block by filtering logs. Works as in inclusion list, and - /// removes every receipt not belonging to it. + /// Prune receipts up to the provided block, inclusive, by filtering logs. Works as in inclusion + /// list, and removes every receipt not belonging to it. Respects the batch size. #[instrument(level = "trace", skip(self, provider), target = "pruner")] fn prune_receipts_by_logs( &self, @@ -298,14 +393,25 @@ impl Pruner { .map(|(bn, _)| bn) .unwrap_or_default(); - // Figure out what receipts have already been pruned, so we can have an accurate - // `address_filter` - let pruned = provider + // Get status checkpoint from latest run + let mut last_pruned_block = provider .get_prune_checkpoint(PrunePart::ContractLogs)? - .map(|checkpoint| checkpoint.block_number); + .and_then(|checkpoint| checkpoint.block_number); + + let initial_last_pruned_block = last_pruned_block; + let mut from_tx_number = match initial_last_pruned_block { + Some(block) => provider + .block_body_indices(block)? + .map(|block| block.last_tx_num() + 1) + .unwrap_or(0), + None => 0, + }; + + // Figure out what receipts have already been pruned, so we can have an accurate + // `address_filter` let address_filter = - self.modes.contract_logs_filter.group_by_block(tip_block_number, pruned)?; + self.modes.receipts_log_filter.group_by_block(tip_block_number, last_pruned_block)?; // Splits all transactions in different block ranges. Each block range will have its own // filter address list and will check it while going through the table @@ -334,9 +440,13 @@ impl Pruner { while let Some((start_block, addresses)) = blocks_iter.next() { filtered_addresses.extend_from_slice(addresses); - // This will clear all receipts before the first appearance of a contract log + // This will clear all receipts before the first appearance of a contract log or since + // the block after the last pruned one. if block_ranges.is_empty() { - block_ranges.push((0, *start_block - 1, 0)); + let init = last_pruned_block.map(|b| b + 1).unwrap_or_default(); + if init < *start_block { + block_ranges.push((init, *start_block - 1, 0)); + } } let end_block = @@ -347,86 +457,107 @@ impl Pruner { block_ranges.push((*start_block, end_block, filtered_addresses.len())); } + trace!( + target: "pruner", + ?block_ranges, + ?filtered_addresses, + "Calculated block ranges and filtered addresses", + ); + + let mut limit = self.batch_sizes.receipts(self.min_block_interval); + let mut done = true; + let mut last_pruned_transaction = None; for (start_block, end_block, num_addresses) in block_ranges { - let range = match self.get_next_tx_num_range_from_checkpoint( - provider, - PrunePart::ContractLogs, - end_block, - )? { - Some(range) => range, + let block_range = start_block..=end_block; + + // Calculate the transaction range from this block range + let tx_range_end = match provider.block_body_indices(end_block)? { + Some(body) => body.last_tx_num(), None => { trace!( - target: "pruner", - block_range = format!("{start_block}..={end_block}"), - "No receipts to prune." + target: "pruner", + ?block_range, + "No receipts to prune." ); continue } }; - - let total = range.clone().count(); - let mut processed = 0; - - provider.prune_table_with_iterator_in_batches::( - range, - self.batch_sizes.receipts, - |rows| { - processed += rows; - trace!( - target: "pruner", - %rows, - block_range = format!("{start_block}..={end_block}"), - progress = format!("{:.1}%", 100.0 * processed as f64 / total as f64), - "Pruned receipts" - ); - }, - |receipt| { - num_addresses > 0 && + let tx_range = from_tx_number..=tx_range_end; + + // Delete receipts, except the ones in the inclusion list + let mut last_skipped_transaction = 0; + let deleted; + (deleted, done) = provider.prune_table_with_range::( + tx_range, + limit, + |(tx_num, receipt)| { + let skip = num_addresses > 0 && receipt.logs.iter().any(|log| { filtered_addresses[..num_addresses].contains(&&log.address) - }) + }); + + if skip { + last_skipped_transaction = *tx_num; + } + skip }, + |row| last_pruned_transaction = Some(row.0), )?; + trace!(target: "pruner", %deleted, %done, ?block_range, "Pruned receipts"); + + limit = limit.saturating_sub(deleted); + + // For accurate checkpoints we need to know that we have checked every transaction. + // Example: we reached the end of the range, and the last receipt is supposed to skip + // its deletion. + last_pruned_transaction = + Some(last_pruned_transaction.unwrap_or_default().max(last_skipped_transaction)); + last_pruned_block = Some( + provider + .transaction_block(last_pruned_transaction.expect("qed"))? + .ok_or(PrunerError::InconsistentData("Block for transaction is not found"))? + // If there's more receipts to prune, set the checkpoint block number to + // previous, so we could finish pruning its receipts on the + // next run. + .saturating_sub(if done { 0 } else { 1 }), + ); - // If this is the last block range, avoid writing an unused checkpoint - if end_block != to_block { - // This allows us to query for the transactions in the next block range with - // [`get_next_tx_num_range_from_checkpoint`]. It's just a temporary intermediate - // checkpoint, which should be adjusted in the end. - provider.save_prune_checkpoint( - PrunePart::ContractLogs, - PruneCheckpoint { - block_number: end_block, - prune_mode: PruneMode::Before(end_block + 1), - }, - )?; + if limit == 0 { + done &= end_block == to_block; + break } + + from_tx_number = last_pruned_transaction.expect("qed") + 1; } // If there are contracts using `PruneMode::Distance(_)` there will be receipts before - // `to_block` that become eligible to be pruned in future runs. Therefore, our - // checkpoint is not actually `to_block`, but the `lowest_block_with_distance` from any - // contract. This ensures that in future pruner runs we can - // prune all these receipts between the previous `lowest_block_with_distance` and the new - // one using `get_next_tx_num_range_from_checkpoint`. - let checkpoint_block = self + // `to_block` that become eligible to be pruned in future runs. Therefore, our checkpoint is + // not actually `to_block`, but the `lowest_block_with_distance` from any contract. + // This ensures that in future pruner runs we can prune all these receipts between the + // previous `lowest_block_with_distance` and the new one using + // `get_next_tx_num_range_from_checkpoint`. + // + // Only applies if we were able to prune everything intended for this run, otherwise the + // checkpoing is the `last_pruned_block`. + let prune_mode_block = self .modes - .contract_logs_filter - .lowest_block_with_distance(tip_block_number, pruned)? + .receipts_log_filter + .lowest_block_with_distance(tip_block_number, initial_last_pruned_block)? .unwrap_or(to_block); provider.save_prune_checkpoint( PrunePart::ContractLogs, PruneCheckpoint { - block_number: checkpoint_block - 1, - prune_mode: PruneMode::Before(checkpoint_block), + block_number: Some(prune_mode_block.min(last_pruned_block.unwrap_or(u64::MAX))), + tx_number: last_pruned_transaction, + prune_mode: PruneMode::Before(prune_mode_block), }, )?; - - Ok(()) + Ok(done) } - /// Prune transaction lookup entries up to the provided block, inclusive. + /// Prune transaction lookup entries up to the provided block, inclusive, respecting the batch + /// size. #[instrument(level = "trace", skip(self, provider), target = "pruner")] fn prune_transaction_lookup( &self, @@ -434,7 +565,7 @@ impl Pruner { to_block: BlockNumber, prune_mode: PruneMode, ) -> PrunerResult { - let range = match self.get_next_tx_num_range_from_checkpoint( + let (start, end) = match self.get_next_tx_num_range_from_checkpoint( provider, PrunePart::TransactionLookup, to_block, @@ -442,52 +573,57 @@ impl Pruner { Some(range) => range, None => { trace!(target: "pruner", "No transaction lookup entries to prune"); - return Ok(()) - } - }; - let last_tx_num = *range.end(); - let total = range.clone().count(); - let mut processed = 0; - - for i in range.step_by(self.batch_sizes.transaction_lookup) { - // The `min` ensures that the transaction range doesn't exceed the last transaction - // number. `last_tx_num + 1` is used to include the last transaction in the range. - let tx_range = i..(i + self.batch_sizes.transaction_lookup as u64).min(last_tx_num + 1); - - // Retrieve transactions in the range and calculate their hashes in parallel - let mut hashes = provider - .transactions_by_tx_range(tx_range.clone())? - .into_par_iter() - .map(|transaction| transaction.hash()) - .collect::>(); - - // Number of transactions retrieved from the database should match the tx range count - let tx_count = tx_range.clone().count(); - if hashes.len() != tx_count { - return Err(PrunerError::InconsistentData( - "Unexpected number of transaction hashes retrieved by transaction number range", - )) + return Ok(true) } + } + .into_inner(); + let tx_range = start..= + (end.min( + start + self.batch_sizes.transaction_lookup(self.min_block_interval) as u64 - 1, + )); + let tx_range_end = *tx_range.end(); + + // Retrieve transactions in the range and calculate their hashes in parallel + let hashes = provider + .transactions_by_tx_range(tx_range.clone())? + .into_par_iter() + .map(|transaction| transaction.hash()) + .collect::>(); + + // Number of transactions retrieved from the database should match the tx range count + let tx_count = tx_range.clone().count(); + if hashes.len() != tx_count { + return Err(PrunerError::InconsistentData( + "Unexpected number of transaction hashes retrieved by transaction number range", + )) + } - // Pre-sort hashes to prune them in order - hashes.sort_unstable(); + let mut last_pruned_transaction = tx_range_end; + let (deleted, done) = provider.prune_table_with_iterator::( + hashes, + self.batch_sizes.transaction_lookup(self.min_block_interval), + |row| last_pruned_transaction = row.1, + )?; + trace!(target: "pruner", %deleted, %done, "Pruned transaction lookup"); - let rows = provider.prune_table_with_iterator::(hashes)?; - processed += rows; - trace!( - target: "pruner", - %rows, - progress = format!("{:.1}%", 100.0 * processed as f64 / total as f64), - "Pruned transaction lookup" - ); - } + let last_pruned_block = provider + .transaction_block(last_pruned_transaction)? + .ok_or(PrunerError::InconsistentData("Block for transaction is not found"))? + // If there's more transaction lookup entries to prune, set the checkpoint block number + // to previous, so we could finish pruning its transaction lookup entries on the next + // run. + .checked_sub(if done { 0 } else { 1 }); provider.save_prune_checkpoint( PrunePart::TransactionLookup, - PruneCheckpoint { block_number: to_block, prune_mode }, + PruneCheckpoint { + block_number: last_pruned_block, + tx_number: Some(last_pruned_transaction), + prune_mode, + }, )?; - Ok(()) + Ok(done) } /// Prune transaction senders up to the provided block, inclusive. @@ -498,7 +634,7 @@ impl Pruner { to_block: BlockNumber, prune_mode: PruneMode, ) -> PrunerResult { - let range = match self.get_next_tx_num_range_from_checkpoint( + let tx_range = match self.get_next_tx_num_range_from_checkpoint( provider, PrunePart::SenderRecovery, to_block, @@ -506,30 +642,37 @@ impl Pruner { Some(range) => range, None => { trace!(target: "pruner", "No transaction senders to prune"); - return Ok(()) + return Ok(true) } }; - let total = range.clone().count(); + let tx_range_end = *tx_range.end(); - provider.prune_table_with_range_in_batches::( - range, - self.batch_sizes.transaction_senders, - |rows, _| { - trace!( - target: "pruner", - %rows, - progress = format!("{:.1}%", 100.0 * rows as f64 / total as f64), - "Pruned transaction senders" - ); - }, + let mut last_pruned_transaction = tx_range_end; + let (deleted, done) = provider.prune_table_with_range::( + tx_range, + self.batch_sizes.transaction_senders(self.min_block_interval), + |_| false, + |row| last_pruned_transaction = row.0, )?; + trace!(target: "pruner", %deleted, %done, "Pruned transaction senders"); + + let last_pruned_block = provider + .transaction_block(last_pruned_transaction)? + .ok_or(PrunerError::InconsistentData("Block for transaction is not found"))? + // If there's more transaction senders to prune, set the checkpoint block number to + // previous, so we could finish pruning its transaction senders on the next run. + .checked_sub(if done { 0 } else { 1 }); provider.save_prune_checkpoint( PrunePart::SenderRecovery, - PruneCheckpoint { block_number: to_block, prune_mode }, + PruneCheckpoint { + block_number: last_pruned_block, + tx_number: Some(last_pruned_transaction), + prune_mode, + }, )?; - Ok(()) + Ok(done) } /// Prune account history up to the provided block, inclusive. @@ -540,48 +683,52 @@ impl Pruner { to_block: BlockNumber, prune_mode: PruneMode, ) -> PrunerResult { - let from_block = provider - .get_prune_checkpoint(PrunePart::AccountHistory)? - .map(|checkpoint| checkpoint.block_number + 1) - .unwrap_or_default(); - let range = from_block..=to_block; - let total = range.clone().count(); + let range = match self.get_next_block_range_from_checkpoint( + provider, + PrunePart::AccountHistory, + to_block, + )? { + Some(range) => range, + None => { + trace!(target: "pruner", "No account history to prune"); + return Ok(true) + } + }; + let range_end = *range.end(); - provider.prune_table_with_range_in_batches::( + let mut last_changeset_pruned_block = None; + let (rows, done) = provider.prune_table_with_range::( range, - self.batch_sizes.account_history, - |keys, rows| { - trace!( - target: "pruner", - %keys, - %rows, - progress = format!("{:.1}%", 100.0 * keys as f64 / total as f64), - "Pruned account history (changesets)" - ); - }, + self.batch_sizes.account_history(self.min_block_interval), + |_| false, + |row| last_changeset_pruned_block = Some(row.0), )?; + trace!(target: "pruner", %rows, %done, "Pruned account history (changesets)"); + + let last_changeset_pruned_block = last_changeset_pruned_block + // If there's more account account changesets to prune, set the checkpoint block number + // to previous, so we could finish pruning its account changesets on the next run. + .map(|block_number| if done { block_number } else { block_number.saturating_sub(1) }) + .unwrap_or(range_end); - self.prune_history_indices::( + let (processed, deleted) = self.prune_history_indices::( provider, - to_block, + last_changeset_pruned_block, |a, b| a.key == b.key, |key| ShardedKey::last(key.key), - self.batch_sizes.account_history, - |rows| { - trace!( - target: "pruner", - rows, - "Pruned account history (indices)" - ); - }, )?; + trace!(target: "pruner", %processed, %deleted, %done, "Pruned account history (history)" ); provider.save_prune_checkpoint( PrunePart::AccountHistory, - PruneCheckpoint { block_number: to_block, prune_mode }, + PruneCheckpoint { + block_number: Some(last_changeset_pruned_block), + tx_number: None, + prune_mode, + }, )?; - Ok(()) + Ok(done) } /// Prune storage history up to the provided block, inclusive. @@ -592,64 +739,70 @@ impl Pruner { to_block: BlockNumber, prune_mode: PruneMode, ) -> PrunerResult { - let from_block = provider - .get_prune_checkpoint(PrunePart::StorageHistory)? - .map(|checkpoint| checkpoint.block_number + 1) - .unwrap_or_default(); - let block_range = from_block..=to_block; - let range = BlockNumberAddress::range(block_range); + let range = match self.get_next_block_range_from_checkpoint( + provider, + PrunePart::StorageHistory, + to_block, + )? { + Some(range) => range, + None => { + trace!(target: "pruner", "No storage history to prune"); + return Ok(true) + } + }; + let range_end = *range.end(); - provider.prune_table_with_range_in_batches::( - range, - self.batch_sizes.storage_history, - |keys, rows| { - trace!( - target: "pruner", - %keys, - %rows, - "Pruned storage history (changesets)" - ); - }, + let mut last_changeset_pruned_block = None; + let (rows, done) = provider.prune_table_with_range::( + BlockNumberAddress::range(range), + self.batch_sizes.storage_history(self.min_block_interval), + |_| false, + |row| last_changeset_pruned_block = Some(row.0.block_number()), )?; + trace!(target: "pruner", %rows, %done, "Pruned storage history (changesets)"); - self.prune_history_indices::( + let last_changeset_pruned_block = last_changeset_pruned_block + // If there's more account storage changesets to prune, set the checkpoint block number + // to previous, so we could finish pruning its storage changesets on the next run. + .map(|block_number| if done { block_number } else { block_number.saturating_sub(1) }) + .unwrap_or(range_end); + + let (processed, deleted) = self.prune_history_indices::( provider, - to_block, + last_changeset_pruned_block, |a, b| a.address == b.address && a.sharded_key.key == b.sharded_key.key, |key| StorageShardedKey::last(key.address, key.sharded_key.key), - self.batch_sizes.storage_history, - |rows| { - trace!( - target: "pruner", - rows, - "Pruned storage history (indices)" - ); - }, )?; + trace!(target: "pruner", %processed, %deleted, %done, "Pruned storage history (history)" ); provider.save_prune_checkpoint( PrunePart::StorageHistory, - PruneCheckpoint { block_number: to_block, prune_mode }, + PruneCheckpoint { + block_number: Some(last_changeset_pruned_block), + tx_number: None, + prune_mode, + }, )?; - Ok(()) + Ok(done) } /// Prune history indices up to the provided block, inclusive. + /// + /// Returns total number of processed (walked) and deleted entities. fn prune_history_indices( &self, provider: &DatabaseProviderRW<'_, DB>, to_block: BlockNumber, key_matches: impl Fn(&T::Key, &T::Key) -> bool, last_key: impl Fn(&T::Key) -> T::Key, - batch_size: usize, - batch_callback: impl Fn(usize), - ) -> PrunerResult + ) -> Result<(usize, usize), PrunerError> where T: Table, T::Key: AsRef>, { let mut processed = 0; + let mut deleted = 0; let mut cursor = provider.tx_ref().cursor_write::()?; // Prune history table: @@ -661,96 +814,107 @@ impl Pruner { while let Some(result) = cursor.next()? { let (key, blocks): (T::Key, BlockNumberList) = result; + // If shard consists only of block numbers less than the target one, delete shard + // completely. if key.as_ref().highest_block_number <= to_block { - // If shard consists only of block numbers less than the target one, delete shard - // completely. cursor.delete_current()?; + deleted += 1; if key.as_ref().highest_block_number == to_block { - // Shard contains only block numbers up to the target one, so we can skip to the - // next sharded key. It is guaranteed that further shards for this sharded key - // will not contain the target block number, as it's in this shard. + // Shard contains only block numbers up to the target one, so we can skip to + // the last shard for this key. It is guaranteed that further shards for this + // sharded key will not contain the target block number, as it's in this shard. cursor.seek_exact(last_key(&key))?; } - } else { - // Shard contains block numbers that are higher than the target one, so we need to - // filter it. It is guaranteed that further shards for this sharded key will not - // contain the target block number, as it's in this shard. + } + // Shard contains block numbers that are higher than the target one, so we need to + // filter it. It is guaranteed that further shards for this sharded key will not + // contain the target block number, as it's in this shard. + else { let new_blocks = blocks .iter(0) .skip_while(|block| *block <= to_block as usize) .collect::>(); + // If there were blocks less than or equal to the target one + // (so the shard has changed), update the shard. if blocks.len() != new_blocks.len() { - // If there were blocks less than or equal to the target one - // (so the shard has changed), update the shard. + // If there are no more blocks in this shard, we need to remove it, as empty + // shards are not allowed. if new_blocks.is_empty() { - // If there are no more blocks in this shard, we need to remove it, as empty - // shards are not allowed. if key.as_ref().highest_block_number == u64::MAX { - if let Some(prev_value) = cursor - .prev()? - .filter(|(prev_key, _)| key_matches(prev_key, &key)) - .map(|(_, prev_value)| prev_value) - { - // If current shard is the last shard for the sharded key that has - // previous shards, replace it with the previous shard. - cursor.delete_current()?; - // Upsert will replace the last shard for this sharded key with the - // previous value. - cursor.upsert(key.clone(), prev_value)?; - } else { + let prev_row = cursor.prev()?; + match prev_row { + // If current shard is the last shard for the sharded key that + // has previous shards, replace it with the previous shard. + Some((prev_key, prev_value)) if key_matches(&prev_key, &key) => { + cursor.delete_current()?; + deleted += 1; + // Upsert will replace the last shard for this sharded key with + // the previous value. + cursor.upsert(key.clone(), prev_value)?; + } // If there's no previous shard for this sharded key, // just delete last shard completely. - - // Jump back to the original last shard. - cursor.next()?; - // Delete shard. - cursor.delete_current()?; + _ => { + // If we successfully moved the cursor to a previous row, + // jump to the original last shard. + if prev_row.is_some() { + cursor.next()?; + } + // Delete shard. + cursor.delete_current()?; + deleted += 1; + } } - } else { - // If current shard is not the last shard for this sharded key, - // just delete it. + } + // If current shard is not the last shard for this sharded key, + // just delete it. + else { cursor.delete_current()?; + deleted += 1; } } else { cursor.upsert(key.clone(), BlockNumberList::new_pre_sorted(new_blocks))?; } } - // Jump to the next address. - cursor.seek_exact(last_key(&key))?; + // Jump to the last shard for this key, if current key isn't already the last shard. + if key.as_ref().highest_block_number != u64::MAX { + cursor.seek_exact(last_key(&key))?; + } } processed += 1; - - if processed % batch_size == 0 { - batch_callback(batch_size); - } } - if processed % batch_size != 0 { - batch_callback(processed % batch_size); - } - - Ok(()) + Ok((processed, deleted)) } } #[cfg(test)] mod tests { - use crate::{pruner::BatchSizes, Pruner}; + use crate::Pruner; use assert_matches::assert_matches; - use reth_db::{tables, test_utils::create_test_rw_db, BlockNumberList}; + use itertools::{ + FoldWhile::{Continue, Done}, + Itertools, + }; + use reth_db::{ + cursor::DbCursorRO, tables, test_utils::create_test_rw_db, transaction::DbTx, + BlockNumberList, + }; use reth_interfaces::test_utils::{ generators, generators::{ - random_block_range, random_changeset_range, random_eoa_account_range, random_receipt, + random_block_range, random_changeset_range, random_eoa_account, + random_eoa_account_range, random_log, random_receipt, }, }; use reth_primitives::{ - BlockNumber, PruneCheckpoint, PruneMode, PruneModes, PrunePart, H256, MAINNET, + BlockNumber, PruneBatchSizes, PruneCheckpoint, PruneMode, PruneModes, PrunePart, + ReceiptsLogPruneConfig, TxNumber, H256, MAINNET, }; - use reth_provider::PruneCheckpointReader; + use reth_provider::{PruneCheckpointReader, TransactionsProvider}; use reth_stages::test_utils::TestTransaction; use std::{collections::BTreeMap, ops::AddAssign}; @@ -758,14 +922,14 @@ mod tests { fn is_pruning_needed() { let db = create_test_rw_db(); let pruner = - Pruner::new(db, MAINNET.clone(), 5, PruneModes::default(), BatchSizes::default()); + Pruner::new(db, MAINNET.clone(), 5, PruneModes::default(), PruneBatchSizes::default()); // No last pruned block number was set before let first_block_number = 1; assert!(pruner.is_pruning_needed(first_block_number)); // Delta is not less than min block interval - let second_block_number = first_block_number + pruner.min_block_interval; + let second_block_number = first_block_number + pruner.min_block_interval as u64; assert!(pruner.is_pruning_needed(second_block_number)); // Delta is less than min block interval @@ -804,33 +968,67 @@ mod tests { let pruner = Pruner::new( tx.inner_raw(), MAINNET.clone(), - 5, + 1, PruneModes { receipts: Some(prune_mode), ..Default::default() }, - BatchSizes { - // Less than total amount of blocks to prune to test the batching logic - receipts: 10, - ..Default::default() - }, + // Less than total amount of blocks to prune to test the batching logic + PruneBatchSizes::default().with_receipts(10), ); + let next_tx_number_to_prune = tx + .inner() + .get_prune_checkpoint(PrunePart::Receipts) + .unwrap() + .and_then(|checkpoint| checkpoint.tx_number) + .map(|tx_number| tx_number + 1) + .unwrap_or_default(); + + let last_pruned_tx_number = + blocks.iter().map(|block| block.body.len()).sum::().min( + next_tx_number_to_prune as usize + + pruner.batch_sizes.receipts(pruner.min_block_interval) - + 1, + ); + + let last_pruned_block_number = blocks + .iter() + .fold_while((0, 0), |(_, mut tx_count), block| { + tx_count += block.body.len(); + + if tx_count > last_pruned_tx_number { + Done((block.number, tx_count)) + } else { + Continue((block.number, tx_count)) + } + }) + .into_inner() + .0; + let provider = tx.inner_rw(); - assert_matches!(pruner.prune_receipts(&provider, to_block, prune_mode), Ok(())); + let result = pruner.prune_receipts(&provider, to_block, prune_mode); + assert_matches!(result, Ok(_)); + let done = result.unwrap(); provider.commit().expect("commit"); + let last_pruned_block_number = + last_pruned_block_number.checked_sub(if done { 0 } else { 1 }); + assert_eq!( tx.table::().unwrap().len(), - blocks[to_block as usize + 1..].iter().map(|block| block.body.len()).sum::() + blocks.iter().map(|block| block.body.len()).sum::() - + (last_pruned_tx_number + 1) ); assert_eq!( tx.inner().get_prune_checkpoint(PrunePart::Receipts).unwrap(), - Some(PruneCheckpoint { block_number: to_block, prune_mode }) + Some(PruneCheckpoint { + block_number: last_pruned_block_number, + tx_number: Some(last_pruned_tx_number as TxNumber), + prune_mode + }) ); }; - // Pruning first time ever, no previous checkpoint is present - test_prune(10); - // Prune second time, previous checkpoint is present, should continue pruning from where - // ended last time + test_prune(15); + test_prune(15); test_prune(20); } @@ -864,36 +1062,67 @@ mod tests { let pruner = Pruner::new( tx.inner_raw(), MAINNET.clone(), - 5, + 1, PruneModes { transaction_lookup: Some(prune_mode), ..Default::default() }, - BatchSizes { - // Less than total amount of blocks to prune to test the batching logic - transaction_lookup: 10, - ..Default::default() - }, + // Less than total amount of blocks to prune to test the batching logic + PruneBatchSizes::default().with_transaction_lookup(10), ); + let next_tx_number_to_prune = tx + .inner() + .get_prune_checkpoint(PrunePart::TransactionLookup) + .unwrap() + .and_then(|checkpoint| checkpoint.tx_number) + .map(|tx_number| tx_number + 1) + .unwrap_or_default(); + + let last_pruned_tx_number = + blocks.iter().map(|block| block.body.len()).sum::().min( + next_tx_number_to_prune as usize + + pruner.batch_sizes.transaction_lookup(pruner.min_block_interval) - + 1, + ); + + let last_pruned_block_number = blocks + .iter() + .fold_while((0, 0), |(_, mut tx_count), block| { + tx_count += block.body.len(); + + if tx_count > last_pruned_tx_number { + Done((block.number, tx_count)) + } else { + Continue((block.number, tx_count)) + } + }) + .into_inner() + .0; + let provider = tx.inner_rw(); - assert_matches!( - pruner.prune_transaction_lookup(&provider, to_block, prune_mode), - Ok(()) - ); + let result = pruner.prune_transaction_lookup(&provider, to_block, prune_mode); + assert_matches!(result, Ok(_)); + let done = result.unwrap(); provider.commit().expect("commit"); + let last_pruned_block_number = + last_pruned_block_number.checked_sub(if done { 0 } else { 1 }); + assert_eq!( tx.table::().unwrap().len(), - blocks[to_block as usize + 1..].iter().map(|block| block.body.len()).sum::() + blocks.iter().map(|block| block.body.len()).sum::() - + (last_pruned_tx_number + 1) ); assert_eq!( tx.inner().get_prune_checkpoint(PrunePart::TransactionLookup).unwrap(), - Some(PruneCheckpoint { block_number: to_block, prune_mode }) + Some(PruneCheckpoint { + block_number: last_pruned_block_number, + tx_number: Some(last_pruned_tx_number as TxNumber), + prune_mode + }) ); }; - // Pruning first time ever, no previous checkpoint is present - test_prune(10); - // Prune second time, previous checkpoint is present, should continue pruning from where - // ended last time + test_prune(15); + test_prune(15); test_prune(20); } @@ -930,36 +1159,67 @@ mod tests { let pruner = Pruner::new( tx.inner_raw(), MAINNET.clone(), - 5, + 1, PruneModes { sender_recovery: Some(prune_mode), ..Default::default() }, - BatchSizes { - // Less than total amount of blocks to prune to test the batching logic - transaction_senders: 10, - ..Default::default() - }, + // Less than total amount of blocks to prune to test the batching logic + PruneBatchSizes::default().with_transaction_senders(10), ); + let next_tx_number_to_prune = tx + .inner() + .get_prune_checkpoint(PrunePart::SenderRecovery) + .unwrap() + .and_then(|checkpoint| checkpoint.tx_number) + .map(|tx_number| tx_number + 1) + .unwrap_or_default(); + + let last_pruned_tx_number = + blocks.iter().map(|block| block.body.len()).sum::().min( + next_tx_number_to_prune as usize + + pruner.batch_sizes.transaction_senders(pruner.min_block_interval) - + 1, + ); + + let last_pruned_block_number = blocks + .iter() + .fold_while((0, 0), |(_, mut tx_count), block| { + tx_count += block.body.len(); + + if tx_count > last_pruned_tx_number { + Done((block.number, tx_count)) + } else { + Continue((block.number, tx_count)) + } + }) + .into_inner() + .0; + let provider = tx.inner_rw(); - assert_matches!( - pruner.prune_transaction_senders(&provider, to_block, prune_mode), - Ok(()) - ); + let result = pruner.prune_transaction_senders(&provider, to_block, prune_mode); + assert_matches!(result, Ok(_)); + let done = result.unwrap(); provider.commit().expect("commit"); + let last_pruned_block_number = + last_pruned_block_number.checked_sub(if done { 0 } else { 1 }); + assert_eq!( tx.table::().unwrap().len(), - blocks[to_block as usize + 1..].iter().map(|block| block.body.len()).sum::() + blocks.iter().map(|block| block.body.len()).sum::() - + (last_pruned_tx_number + 1) ); assert_eq!( tx.inner().get_prune_checkpoint(PrunePart::SenderRecovery).unwrap(), - Some(PruneCheckpoint { block_number: to_block, prune_mode }) + Some(PruneCheckpoint { + block_number: last_pruned_block_number, + tx_number: Some(last_pruned_tx_number as TxNumber), + prune_mode + }) ); }; - // Pruning first time ever, no previous checkpoint is present - test_prune(10); - // Prune second time, previous checkpoint is present, should continue pruning from where - // ended last time + test_prune(15); + test_prune(15); test_prune(20); } @@ -968,8 +1228,7 @@ mod tests { let tx = TestTransaction::default(); let mut rng = generators::rng(); - let block_num = 7000; - let blocks = random_block_range(&mut rng, 0..=block_num, H256::zero(), 0..1); + let blocks = random_block_range(&mut rng, 0..=7000, H256::zero(), 0..1); tx.insert_blocks(blocks.iter(), None).expect("insert blocks"); let accounts = @@ -1001,38 +1260,74 @@ mod tests { let original_shards = tx.table::().unwrap(); - let test_prune = |to_block: BlockNumber| { + let test_prune = |to_block: BlockNumber, run: usize, expect_done: bool| { let prune_mode = PruneMode::Before(to_block); let pruner = Pruner::new( tx.inner_raw(), MAINNET.clone(), - 5, + 1, PruneModes { account_history: Some(prune_mode), ..Default::default() }, - BatchSizes { - // Less than total amount of blocks to prune to test the batching logic - account_history: 10, - ..Default::default() - }, + // Less than total amount of blocks to prune to test the batching logic + PruneBatchSizes::default().with_account_history(2000), ); let provider = tx.inner_rw(); - assert_matches!(pruner.prune_account_history(&provider, to_block, prune_mode), Ok(())); + let result = pruner.prune_account_history(&provider, to_block, prune_mode); + assert_matches!(result, Ok(_)); + let done = result.unwrap(); + assert_eq!(done, expect_done); provider.commit().expect("commit"); + let changesets = changesets + .iter() + .enumerate() + .flat_map(|(block_number, changeset)| { + changeset.into_iter().map(move |change| (block_number, change)) + }) + .collect::>(); + + let pruned = changesets + .iter() + .enumerate() + .skip_while(|(i, (block_number, _))| { + *i < pruner.batch_sizes.account_history(pruner.min_block_interval) * run && + *block_number <= to_block as usize + }) + .next() + .map(|(i, _)| i) + .unwrap_or_default(); + + let mut pruned_changesets = changesets + .iter() + // Skip what we've pruned so far, subtracting one to get last pruned block number + // further down + .skip(pruned.saturating_sub(1)); + + let last_pruned_block_number = pruned_changesets + .next() + .map(|(block_number, _)| if done { *block_number } else { block_number.saturating_sub(1) } as BlockNumber) + .unwrap_or(to_block); + + let pruned_changesets = + pruned_changesets.fold(BTreeMap::new(), |mut acc, (block_number, change)| { + acc.entry(block_number).or_insert_with(Vec::new).push(change); + acc + }); + assert_eq!( tx.table::().unwrap().len(), - changesets[to_block as usize + 1..].iter().flatten().count() + pruned_changesets.values().flatten().count() ); let actual_shards = tx.table::().unwrap(); let expected_shards = original_shards .iter() - .filter(|(key, _)| key.highest_block_number > to_block) + .filter(|(key, _)| key.highest_block_number > last_pruned_block_number) .map(|(key, blocks)| { let new_blocks = blocks .iter(0) - .skip_while(|block| *block <= to_block as usize) + .skip_while(|block| *block <= last_pruned_block_number as usize) .collect::>(); (key.clone(), BlockNumberList::new_pre_sorted(new_blocks)) }) @@ -1042,15 +1337,17 @@ mod tests { assert_eq!( tx.inner().get_prune_checkpoint(PrunePart::AccountHistory).unwrap(), - Some(PruneCheckpoint { block_number: to_block, prune_mode }) + Some(PruneCheckpoint { + block_number: Some(last_pruned_block_number), + tx_number: None, + prune_mode + }) ); }; - // Prune first time: no previous checkpoint is present - test_prune(3000); - // Prune second time: previous checkpoint is present, should continue pruning from where - // ended last time - test_prune(4500); + test_prune(1700, 1, false); + test_prune(1700, 2, true); + test_prune(2000, 3, true); } #[test] @@ -1058,8 +1355,7 @@ mod tests { let tx = TestTransaction::default(); let mut rng = generators::rng(); - let block_num = 7000; - let blocks = random_block_range(&mut rng, 0..=block_num, H256::zero(), 0..1); + let blocks = random_block_range(&mut rng, 0..=7000, H256::zero(), 0..1); tx.insert_blocks(blocks.iter(), None).expect("insert blocks"); let accounts = @@ -1091,42 +1387,78 @@ mod tests { let original_shards = tx.table::().unwrap(); - let test_prune = |to_block: BlockNumber| { + let test_prune = |to_block: BlockNumber, run: usize, expect_done: bool| { let prune_mode = PruneMode::Before(to_block); let pruner = Pruner::new( tx.inner_raw(), MAINNET.clone(), - 5, + 1, PruneModes { storage_history: Some(prune_mode), ..Default::default() }, - BatchSizes { - // Less than total amount of blocks to prune to test the batching logic - storage_history: 10, - ..Default::default() - }, + // Less than total amount of blocks to prune to test the batching logic + PruneBatchSizes::default().with_storage_history(2000), ); let provider = tx.inner_rw(); - assert_matches!(pruner.prune_storage_history(&provider, to_block, prune_mode), Ok(())); + let result = pruner.prune_storage_history(&provider, to_block, prune_mode); + assert_matches!(result, Ok(_)); + let done = result.unwrap(); + assert_eq!(done, expect_done); provider.commit().expect("commit"); + let changesets = changesets + .iter() + .enumerate() + .flat_map(|(block_number, changeset)| { + changeset.into_iter().flat_map(move |(address, _, entries)| { + entries.into_iter().map(move |entry| (block_number, address, entry)) + }) + }) + .collect::>(); + + let pruned = changesets + .iter() + .enumerate() + .skip_while(|(i, (block_number, _, _))| { + *i < pruner.batch_sizes.storage_history(pruner.min_block_interval) * run && + *block_number <= to_block as usize + }) + .next() + .map(|(i, _)| i) + .unwrap_or_default(); + + let mut pruned_changesets = changesets + .iter() + // Skip what we've pruned so far, subtracting one to get last pruned block number + // further down + .skip(pruned.saturating_sub(1)); + + let last_pruned_block_number = pruned_changesets + .next() + .map(|(block_number, _, _)| if done { *block_number } else { block_number.saturating_sub(1) } as BlockNumber) + .unwrap_or(to_block); + + let pruned_changesets = pruned_changesets.fold( + BTreeMap::new(), + |mut acc, (block_number, address, entry)| { + acc.entry((block_number, address)).or_insert_with(Vec::new).push(entry); + acc + }, + ); + assert_eq!( tx.table::().unwrap().len(), - changesets[to_block as usize + 1..] - .iter() - .flatten() - .flat_map(|(_, _, entries)| entries) - .count() + pruned_changesets.values().flatten().count() ); let actual_shards = tx.table::().unwrap(); let expected_shards = original_shards .iter() - .filter(|(key, _)| key.sharded_key.highest_block_number > to_block) + .filter(|(key, _)| key.sharded_key.highest_block_number > last_pruned_block_number) .map(|(key, blocks)| { let new_blocks = blocks .iter(0) - .skip_while(|block| *block <= to_block as usize) + .skip_while(|block| *block <= last_pruned_block_number as usize) .collect::>(); (key.clone(), BlockNumberList::new_pre_sorted(new_blocks)) }) @@ -1136,14 +1468,113 @@ mod tests { assert_eq!( tx.inner().get_prune_checkpoint(PrunePart::StorageHistory).unwrap(), - Some(PruneCheckpoint { block_number: to_block, prune_mode }) + Some(PruneCheckpoint { + block_number: Some(last_pruned_block_number), + tx_number: None, + prune_mode + }) + ); + }; + + test_prune(2300, 1, false); + test_prune(2300, 2, true); + test_prune(3000, 3, true); + } + + #[test] + fn prune_receipts_by_logs() { + let tx = TestTransaction::default(); + let mut rng = generators::rng(); + + let tip = 300; + let blocks = random_block_range(&mut rng, 0..=tip, H256::zero(), 1..5); + tx.insert_blocks(blocks.iter(), None).expect("insert blocks"); + + let mut receipts = Vec::new(); + + let (deposit_contract_addr, _) = random_eoa_account(&mut rng); + for block in &blocks { + assert!(!block.body.is_empty()); + for (txi, transaction) in block.body.iter().enumerate() { + let mut receipt = random_receipt(&mut rng, transaction, Some(1)); + receipt.logs.push(random_log( + &mut rng, + if txi == (block.body.len() - 1) { Some(deposit_contract_addr) } else { None }, + Some(1), + )); + receipts.push((receipts.len() as u64, receipt)); + } + } + tx.insert_receipts(receipts).expect("insert receipts"); + + assert_eq!( + tx.table::().unwrap().len(), + blocks.iter().map(|block| block.body.len()).sum::() + ); + assert_eq!( + tx.table::().unwrap().len(), + tx.table::().unwrap().len() + ); + + let run_prune = || { + let provider = tx.inner_rw(); + + let prune_before_block: usize = 20; + let prune_mode = PruneMode::Before(prune_before_block as u64); + let receipts_log_filter = + ReceiptsLogPruneConfig(BTreeMap::from([(deposit_contract_addr, prune_mode)])); + let pruner = Pruner::new( + tx.inner_raw(), + MAINNET.clone(), + 5, + PruneModes { + receipts_log_filter: receipts_log_filter.clone(), + ..Default::default() + }, + // Less than total amount of blocks to prune to test the batching logic + PruneBatchSizes::default().with_storage_history(10), ); + + let result = pruner.prune_receipts_by_logs(&provider, tip); + assert_matches!(result, Ok(_)); + let done = result.unwrap(); + provider.commit().expect("commit"); + + let (pruned_block, pruned_tx) = tx + .inner() + .get_prune_checkpoint(PrunePart::ContractLogs) + .unwrap() + .and_then(|checkpoint| { + Some((checkpoint.block_number.unwrap(), checkpoint.tx_number.unwrap())) + }) + .unwrap_or_default(); + + // All receipts are in the end of the block + let unprunable = pruned_block.saturating_sub(prune_before_block as u64 - 1); + + assert_eq!( + tx.table::().unwrap().len(), + blocks.iter().map(|block| block.body.len()).sum::() - + ((pruned_tx + 1) - unprunable) as usize + ); + + return done }; - // Prune first time: no previous checkpoint is present - test_prune(3000); - // Prune second time: previous checkpoint is present, should continue pruning from where - // ended last time - test_prune(4500); + while !run_prune() {} + + let provider = tx.inner(); + let mut cursor = provider.tx_ref().cursor_read::().unwrap(); + let walker = cursor.walk(None).unwrap(); + for receipt in walker { + let (tx_num, receipt) = receipt.unwrap(); + + // Either we only find our contract, or the receipt is part of the unprunable receipts + // set by tip - 128 + assert!( + receipt.logs.iter().any(|l| l.address == deposit_contract_addr) || + provider.transaction_block(tx_num).unwrap().unwrap() > tip - 128, + ); + } } } diff --git a/crates/revm/revm-inspectors/src/tracing/builder/geth.rs b/crates/revm/revm-inspectors/src/tracing/builder/geth.rs index 255cfdba96b..f2bbdeda756 100644 --- a/crates/revm/revm-inspectors/src/tracing/builder/geth.rs +++ b/crates/revm/revm-inspectors/src/tracing/builder/geth.rs @@ -110,7 +110,10 @@ impl GethTraceBuilder { /// Generate a geth-style traces for the call tracer. /// /// This decodes all call frames from the recorded traces. - pub fn geth_call_traces(&self, opts: CallConfig) -> CallFrame { + /// + /// This expects the gas used and return value for the + /// [ExecutionResult](revm::primitives::ExecutionResult) of the executed transaction. + pub fn geth_call_traces(&self, opts: CallConfig, gas_used: u64) -> CallFrame { if self.nodes.is_empty() { return Default::default() } @@ -119,6 +122,7 @@ impl GethTraceBuilder { // first fill up the root let main_trace_node = &self.nodes[0]; let mut root_call_frame = main_trace_node.geth_empty_call_frame(include_logs); + root_call_frame.gas_used = U256::from(gas_used); // selfdestructs are not recorded as individual call traces but are derived from // the call trace and are added as additional `CallFrame` objects to the parent call diff --git a/crates/revm/revm-inspectors/src/tracing/mod.rs b/crates/revm/revm-inspectors/src/tracing/mod.rs index 48f772ebde8..c01d19c8e94 100644 --- a/crates/revm/revm-inspectors/src/tracing/mod.rs +++ b/crates/revm/revm-inspectors/src/tracing/mod.rs @@ -175,6 +175,8 @@ impl TracingInspector { if self.trace_stack.is_empty() { // this is the root call which should get the original gas limit of the transaction, // because initialization costs are already subtracted from gas_limit + // For the root call this value should use the transaction's gas limit + // See and gas_limit = data.env.tx.gas_limit; // we set the spec id here because we only need to do this once and this condition is diff --git a/crates/revm/revm-inspectors/src/tracing/types.rs b/crates/revm/revm-inspectors/src/tracing/types.rs index 6c877830541..6a64738854d 100644 --- a/crates/revm/revm-inspectors/src/tracing/types.rs +++ b/crates/revm/revm-inspectors/src/tracing/types.rs @@ -310,31 +310,51 @@ impl CallTraceNode { // iterate over all storage diffs for change in self.trace.steps.iter().filter_map(|s| s.storage_change) { let StorageChange { key, value, had_value } = change; - let value = H256::from(value); + let h256_value = H256::from(value); match acc.storage.entry(key.into()) { Entry::Vacant(entry) => { if let Some(had_value) = had_value { - entry.insert(Delta::Changed(ChangedType { - from: had_value.into(), - to: value, - })); + if value != had_value { + entry.insert(Delta::Changed(ChangedType { + from: had_value.into(), + to: h256_value, + })); + } } else { - entry.insert(Delta::Added(value)); + entry.insert(Delta::Added(h256_value)); } } Entry::Occupied(mut entry) => { let value = match entry.get() { - Delta::Unchanged => Delta::Added(value), + Delta::Unchanged => { + if let Some(had_value) = had_value { + if value != had_value { + Delta::Changed(ChangedType { + from: had_value.into(), + to: h256_value, + }) + } else { + Delta::Unchanged + } + } else { + Delta::Added(h256_value) + } + } Delta::Added(added) => { - if added == &value { + if added == &h256_value { Delta::Added(*added) } else { - Delta::Changed(ChangedType { from: *added, to: value }) + Delta::Changed(ChangedType { from: *added, to: h256_value }) } } - Delta::Removed(_) => Delta::Added(value), + Delta::Removed(_) => Delta::Added(h256_value), Delta::Changed(c) => { - Delta::Changed(ChangedType { from: c.from, to: value }) + if c.from == h256_value { + // remains unchanged if the value is the same + Delta::Unchanged + } else { + Delta::Changed(ChangedType { from: c.from, to: h256_value }) + } } }; entry.insert(value); diff --git a/crates/revm/src/executor.rs b/crates/revm/src/executor.rs index 54962595627..faca8eaa01c 100644 --- a/crates/revm/src/executor.rs +++ b/crates/revm/src/executor.rs @@ -348,6 +348,13 @@ where // append gas used cumulative_gas_used += result.gas_used(); + tracing::trace!( + target: "revm::executor", + hash = ?transaction.hash, + gas_used = result.gas_used(), + "transaction executed" + ); + // Push transaction changeset and calculate header bloom filter for receipt. post_state.add_receipt( block.number, diff --git a/crates/rpc/rpc-api/src/debug.rs b/crates/rpc/rpc-api/src/debug.rs index 91377017425..8422a175350 100644 --- a/crates/rpc/rpc-api/src/debug.rs +++ b/crates/rpc/rpc-api/src/debug.rs @@ -1,7 +1,6 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{BlockId, BlockNumberOrTag, Bytes, H256}; +use reth_primitives::{BlockId, BlockNumberOrTag, Bytes, H160, H256}; use reth_rpc_types::{ - state::StateOverride, trace::geth::{ BlockTraceResult, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, TraceResult, @@ -120,8 +119,7 @@ pub trait DebugApi { &self, bundles: Vec, state_context: Option, - opts: Option, - state_override: Option, + opts: Option, ) -> RpcResult>; /// Sets the logging backtrace location. When a backtrace location is set and a log message is @@ -129,4 +127,242 @@ pub trait DebugApi { /// be printed to stderr. #[method(name = "backtraceAt")] async fn debug_backtrace_at(&self, location: &str) -> RpcResult<()>; + + /// Enumerates all accounts at a given block with paging capability. `maxResults` are returned + /// in the page and the items have keys that come after the `start` key (hashed address). + /// + /// If incompletes is false, then accounts for which the key preimage (i.e: the address) doesn't + /// exist in db are skipped. NB: geth by default does not store preimages. + #[method(name = "accountRange")] + async fn debug_account_range( + &self, + block_number: BlockNumberOrTag, + start: Bytes, + max_results: u64, + nocode: bool, + nostorage: bool, + incompletes: bool, + ) -> RpcResult<()>; + + /// Turns on block profiling for the given duration and writes profile data to disk. It uses a + /// profile rate of 1 for most accurate information. If a different rate is desired, set the + /// rate and write the profile manually using `debug_writeBlockProfile`. + #[method(name = "blockProfile")] + async fn debug_block_profile(&self, file: String, seconds: u64) -> RpcResult<()>; + + /// Flattens the entire key-value database into a single level, removing all unused slots and + /// merging all keys. + #[method(name = "chaindbCompact")] + async fn debug_chaindb_compact(&self) -> RpcResult<()>; + + /// Returns leveldb properties of the key-value database. + #[method(name = "chaindbProperty")] + async fn debug_chaindb_property(&self, property: String) -> RpcResult<()>; + + /// Turns on CPU profiling for the given duration and writes profile data to disk. + #[method(name = "cpuProfile")] + async fn debug_cpu_profile(&self, file: String, seconds: u64) -> RpcResult<()>; + + /// Retrieves an ancient binary blob from the freezer. The freezer is a collection of + /// append-only immutable files. The first argument `kind` specifies which table to look up data + /// from. The list of all table kinds are as follows: + #[method(name = "dbAncient")] + async fn debug_db_ancient(&self, kind: String, number: u64) -> RpcResult<()>; + + /// Returns the number of ancient items in the ancient store. + #[method(name = "dbAncients")] + async fn debug_db_ancients(&self) -> RpcResult<()>; + + /// Returns the raw value of a key stored in the database. + #[method(name = "dbGet")] + async fn debug_db_get(&self, key: String) -> RpcResult<()>; + + /// Retrieves the state that corresponds to the block number and returns a list of accounts + /// (including storage and code). + #[method(name = "dumpBlock")] + async fn debug_dump_block(&self, number: BlockId) -> RpcResult<()>; + + /// Forces garbage collection. + #[method(name = "freeOSMemory")] + async fn debug_free_os_memory(&self) -> RpcResult<()>; + + /// Forces a temporary client freeze, normally when the server is overloaded. + #[method(name = "freezeClient")] + async fn debug_freeze_client(&self, node: String) -> RpcResult<()>; + + /// Returns garbage collection statistics. + #[method(name = "gcStats")] + async fn debug_gc_stats(&self) -> RpcResult<()>; + + /// Returns the first number where the node has accessible state on disk. This is the + /// post-state of that block and the pre-state of the next block. The (from, to) parameters + /// are the sequence of blocks to search, which can go either forwards or backwards. + /// + /// Note: to get the last state pass in the range of blocks in reverse, i.e. (last, first). + #[method(name = "getAccessibleState")] + async fn debug_get_accessible_state( + &self, + from: BlockNumberOrTag, + to: BlockNumberOrTag, + ) -> RpcResult<()>; + + /// Returns all accounts that have changed between the two blocks specified. A change is defined + /// as a difference in nonce, balance, code hash, or storage hash. With one parameter, returns + /// the list of accounts modified in the specified block. + #[method(name = "getModifiedAccountsByHash")] + async fn debug_get_modified_accounts_by_hash( + &self, + start_hash: H256, + end_hash: H256, + ) -> RpcResult<()>; + + /// Returns all accounts that have changed between the two blocks specified. A change is defined + /// as a difference in nonce, balance, code hash or storage hash. + #[method(name = "getModifiedAccountsByNumber")] + async fn debug_get_modified_accounts_by_number( + &self, + start_number: u64, + end_number: u64, + ) -> RpcResult<()>; + + /// Turns on Go runtime tracing for the given duration and writes trace data to disk. + #[method(name = "goTrace")] + async fn debug_go_trace(&self, file: String, seconds: u64) -> RpcResult<()>; + + /// Executes a block (bad- or canon- or side-), and returns a list of intermediate roots: the + /// stateroot after each transaction. + #[method(name = "intermediateRoots")] + async fn debug_intermediate_roots( + &self, + block_hash: H256, + opts: Option, + ) -> RpcResult<()>; + + /// Returns detailed runtime memory statistics. + #[method(name = "memStats")] + async fn debug_mem_stats(&self) -> RpcResult<()>; + + /// Turns on mutex profiling for `nsec` seconds and writes profile data to file. It uses a + /// profile rate of 1 for most accurate information. If a different rate is desired, set the + /// rate and write the profile manually. + #[method(name = "mutexProfile")] + async fn debug_mutex_profile(&self, file: String, nsec: u64) -> RpcResult<()>; + + /// Returns the preimage for a sha3 hash, if known. + #[method(name = "preimage")] + async fn debug_preimage(&self, hash: H256) -> RpcResult<()>; + + /// Retrieves a block and returns its pretty printed form. + #[method(name = "printBlock")] + async fn debug_print_block(&self, number: u64) -> RpcResult<()>; + + /// Fetches and retrieves the seed hash of the block by number. + #[method(name = "seedHash")] + async fn debug_seed_hash(&self, number: u64) -> RpcResult; + + /// Sets the rate (in samples/sec) of goroutine block profile data collection. A non-zero rate + /// enables block profiling, setting it to zero stops the profile. Collected profile data can be + /// written using `debug_writeBlockProfile`. + #[method(name = "setBlockProfileRate")] + async fn debug_set_block_profile_rate(&self, rate: u64) -> RpcResult<()>; + + /// Sets the garbage collection target percentage. A negative value disables garbage collection. + #[method(name = "setGCPercent")] + async fn debug_set_gc_percent(&self, v: i32) -> RpcResult<()>; + + /// Sets the current head of the local chain by block number. Note, this is a destructive action + /// and may severely damage your chain. Use with extreme caution. + #[method(name = "setHead")] + async fn debug_set_head(&self, number: u64) -> RpcResult<()>; + + /// Sets the rate of mutex profiling. + #[method(name = "setMutexProfileFraction")] + async fn debug_set_mutex_profile_fraction(&self, rate: i32) -> RpcResult<()>; + + /// Configures how often in-memory state tries are persisted to disk. The interval needs to be + /// in a format parsable by a time.Duration. Note that the interval is not wall-clock time. + /// Rather it is accumulated block processing time after which the state should be flushed. + #[method(name = "setTrieFlushInterval")] + async fn debug_set_trie_flush_interval(&self, interval: String) -> RpcResult<()>; + + /// Returns a printed representation of the stacks of all goroutines. + #[method(name = "stacks")] + async fn debug_stacks(&self) -> RpcResult<()>; + + /// Used to obtain info about a block. + #[method(name = "standardTraceBadBlockToFile")] + async fn debug_standard_trace_bad_block_to_file( + &self, + block: BlockNumberOrTag, + opts: Option, + ) -> RpcResult<()>; + + /// This method is similar to `debug_standardTraceBlockToFile`, but can be used to obtain info + /// about a block which has been rejected as invalid (for some reason). + #[method(name = "standardTraceBlockToFile")] + async fn debug_standard_trace_block_to_file( + &self, + block: BlockNumberOrTag, + opts: Option, + ) -> RpcResult<()>; + + /// Turns on CPU profiling indefinitely, writing to the given file. + #[method(name = "startCPUProfile")] + async fn debug_start_cpu_profile(&self, file: String) -> RpcResult<()>; + + /// Starts writing a Go runtime trace to the given file. + #[method(name = "startGoTrace")] + async fn debug_start_go_trace(&self, file: String) -> RpcResult<()>; + + /// Stops an ongoing CPU profile. + #[method(name = "stopCPUProfile")] + async fn debug_stop_cpu_profile(&self) -> RpcResult<()>; + + /// Stops writing the Go runtime trace. + #[method(name = "stopGoTrace")] + async fn debug_stop_go_trace(&self) -> RpcResult<()>; + + /// Returns the storage at the given block height and transaction index. The result can be + /// paged by providing a `maxResult` to cap the number of storage slots returned as well as + /// specifying the offset via `keyStart` (hash of storage key). + #[method(name = "storageRangeAt")] + async fn debug_storage_range_at( + &self, + block_hash: H256, + tx_idx: usize, + contract_address: H160, + key_start: H256, + max_result: u64, + ) -> RpcResult<()>; + + /// Returns the structured logs created during the execution of EVM against a block pulled + /// from the pool of bad ones and returns them as a JSON object. For the second parameter see + /// TraceConfig reference. + #[method(name = "traceBadBlock")] + async fn debug_trace_bad_block( + &self, + block_hash: H256, + opts: Option, + ) -> RpcResult<()>; + + /// Sets the logging verbosity ceiling. Log messages with level up to and including the given + /// level will be printed. + #[method(name = "verbosity")] + async fn debug_verbosity(&self, level: usize) -> RpcResult<()>; + + /// Sets the logging verbosity pattern. + #[method(name = "vmodule")] + async fn debug_vmodule(&self, pattern: String) -> RpcResult<()>; + + /// Writes a goroutine blocking profile to the given file. + #[method(name = "writeBlockProfile")] + async fn debug_write_block_profile(&self, file: String) -> RpcResult<()>; + + /// Writes an allocation profile to the given file. + #[method(name = "writeMemProfile")] + async fn debug_write_mem_profile(&self, file: String) -> RpcResult<()>; + + /// Writes a goroutine blocking profile to the given file. + #[method(name = "writeMutexProfile")] + async fn debug_write_mutex_profile(&self, file: String) -> RpcResult<()>; } diff --git a/crates/rpc/rpc-api/src/eth_filter.rs b/crates/rpc/rpc-api/src/eth_filter.rs index 9e313c75f70..484157898e1 100644 --- a/crates/rpc/rpc-api/src/eth_filter.rs +++ b/crates/rpc/rpc-api/src/eth_filter.rs @@ -23,7 +23,7 @@ pub trait EthFilterApi { /// Returns all logs matching given filter (in a range 'from' - 'to'). #[method(name = "getFilterLogs")] - async fn filter_logs(&self, id: FilterId) -> RpcResult>; + async fn filter_logs(&self, id: FilterId) -> RpcResult; /// Uninstalls filter. #[method(name = "uninstallFilter")] diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index 072cdbb3a29..c9246376a53 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -30,7 +30,7 @@ hyper = "0.14" # metrics reth-metrics = { workspace = true, features = ["common"] } -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true # misc strum = { workspace = true, features = ["derive"] } diff --git a/crates/rpc/rpc-types/src/eth/engine/payload.rs b/crates/rpc/rpc-types/src/eth/engine/payload.rs index 6b8996be4c7..e7c02e9fa1a 100644 --- a/crates/rpc/rpc-types/src/eth/engine/payload.rs +++ b/crates/rpc/rpc-types/src/eth/engine/payload.rs @@ -32,9 +32,12 @@ impl std::fmt::Display for PayloadId { } } -/// This structure maps for the return value of `engine_getPayloadV2` of the beacon chain spec. +/// This structure maps for the return value of `engine_getPayload` of the beacon chain spec, for +/// both V2 and V3. /// -/// See also: +/// See also: +/// +/// #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct ExecutionPayloadEnvelope { /// Execution payload, which could be either V1 or V2 @@ -53,6 +56,10 @@ pub struct ExecutionPayloadEnvelope { // // TODO(mattsse): for V3 // #[serde(rename = "blobsBundle", skip_serializing_if = "Option::is_none")] // pub blobs_bundle: Option, + /// Introduced in V3, this represents a suggestion from the execution layer if the payload + /// should be used instead of an externally provided one. + #[serde(rename = "shouldOverrideBuilder", skip_serializing_if = "Option::is_none")] + pub should_override_builder: Option, } impl ExecutionPayloadEnvelope { @@ -82,14 +89,20 @@ pub struct ExecutionPayload { pub timestamp: U64, pub extra_data: Bytes, pub base_fee_per_gas: U256, - pub blob_gas_used: Option, - pub excess_blob_gas: Option, pub block_hash: H256, pub transactions: Vec, /// Array of [`Withdrawal`] enabled with V2 /// See #[serde(default, skip_serializing_if = "Option::is_none")] pub withdrawals: Option>, + /// Array of [`U64`] representing blob gas used, enabled with V3 + /// See + #[serde(default, skip_serializing_if = "Option::is_none")] + pub blob_gas_used: Option, + /// Array of [`U64`] representing excess blob gas, enabled with V3 + /// See + #[serde(default, skip_serializing_if = "Option::is_none")] + pub excess_blob_gas: Option, } impl From for ExecutionPayload { @@ -550,16 +563,16 @@ mod tests { #[test] fn serde_roundtrip_legacy_txs_payload() { - // pulled from hive tests - let s = r#"{"parentHash":"0x67ead97eb79b47a1638659942384143f36ed44275d4182799875ab5a87324055","feeRecipient":"0x0000000000000000000000000000000000000000","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x4e3c608a9f2e129fccb91a1dae7472e78013b8e654bccc8d224ce3d63ae17006","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prevRandao":"0x44bb4b98c59dbb726f96ffceb5ee028dcbe35b9bba4f9ffd56aeebf8d1e4db62","blockNumber":"0x1","gasLimit":"0x2fefd8","gasUsed":"0xa860","timestamp":"0x1235","extraData":"0x8b726574682f76302e312e30","baseFeePerGas":"0x342770c0","blobGasUsed":null,"excessBlobGas":null,"blockHash":"0x5655011482546f16b2312ef18e9fad03d6a52b1be95401aea884b222477f9e64","transactions":["0xf865808506fc23ac00830124f8940000000000000000000000000000000000000316018032a044b25a8b9b247d01586b3d59c71728ff49c9b84928d9e7fa3377ead3b5570b5da03ceac696601ff7ee6f5fe8864e2998db9babdf5eeba1a0cd5b4d44b3fcbd181b"]}"#; + // pulled from hive tests - modified with 4844 fields + let s = r#"{"parentHash":"0x67ead97eb79b47a1638659942384143f36ed44275d4182799875ab5a87324055","feeRecipient":"0x0000000000000000000000000000000000000000","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x4e3c608a9f2e129fccb91a1dae7472e78013b8e654bccc8d224ce3d63ae17006","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prevRandao":"0x44bb4b98c59dbb726f96ffceb5ee028dcbe35b9bba4f9ffd56aeebf8d1e4db62","blockNumber":"0x1","gasLimit":"0x2fefd8","gasUsed":"0xa860","timestamp":"0x1235","extraData":"0x8b726574682f76302e312e30","baseFeePerGas":"0x342770c0","blockHash":"0x5655011482546f16b2312ef18e9fad03d6a52b1be95401aea884b222477f9e64","transactions":["0xf865808506fc23ac00830124f8940000000000000000000000000000000000000316018032a044b25a8b9b247d01586b3d59c71728ff49c9b84928d9e7fa3377ead3b5570b5da03ceac696601ff7ee6f5fe8864e2998db9babdf5eeba1a0cd5b4d44b3fcbd181b"],"blobGasUsed":"0xb10b","excessBlobGas":"0xb10b"}"#; let payload: ExecutionPayload = serde_json::from_str(s).unwrap(); assert_eq!(serde_json::to_string(&payload).unwrap(), s); } #[test] fn serde_roundtrip_enveloped_txs_payload() { - // pulled from hive tests - let s = r#"{"parentHash":"0x67ead97eb79b47a1638659942384143f36ed44275d4182799875ab5a87324055","feeRecipient":"0x0000000000000000000000000000000000000000","stateRoot":"0x76a03cbcb7adce07fd284c61e4fa31e5e786175cefac54a29e46ec8efa28ea41","receiptsRoot":"0x4e3c608a9f2e129fccb91a1dae7472e78013b8e654bccc8d224ce3d63ae17006","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prevRandao":"0x028111cb7d25918386a69656b3d17b2febe95fd0f11572c1a55c14f99fdfe3df","blockNumber":"0x1","gasLimit":"0x2fefd8","gasUsed":"0xa860","timestamp":"0x1235","extraData":"0x8b726574682f76302e312e30","baseFeePerGas":"0x342770c0","blobGasUsed":null,"excessBlobGas":null,"blockHash":"0xa6f40ed042e61e88e76125dede8fff8026751ea14454b68fb534cea99f2b2a77","transactions":["0xf865808506fc23ac00830124f8940000000000000000000000000000000000000316018032a044b25a8b9b247d01586b3d59c71728ff49c9b84928d9e7fa3377ead3b5570b5da03ceac696601ff7ee6f5fe8864e2998db9babdf5eeba1a0cd5b4d44b3fcbd181b"]}"#; + // pulled from hive tests - modified with 4844 fields + let s = r#"{"parentHash":"0x67ead97eb79b47a1638659942384143f36ed44275d4182799875ab5a87324055","feeRecipient":"0x0000000000000000000000000000000000000000","stateRoot":"0x76a03cbcb7adce07fd284c61e4fa31e5e786175cefac54a29e46ec8efa28ea41","receiptsRoot":"0x4e3c608a9f2e129fccb91a1dae7472e78013b8e654bccc8d224ce3d63ae17006","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prevRandao":"0x028111cb7d25918386a69656b3d17b2febe95fd0f11572c1a55c14f99fdfe3df","blockNumber":"0x1","gasLimit":"0x2fefd8","gasUsed":"0xa860","timestamp":"0x1235","extraData":"0x8b726574682f76302e312e30","baseFeePerGas":"0x342770c0","blockHash":"0xa6f40ed042e61e88e76125dede8fff8026751ea14454b68fb534cea99f2b2a77","transactions":["0xf865808506fc23ac00830124f8940000000000000000000000000000000000000316018032a044b25a8b9b247d01586b3d59c71728ff49c9b84928d9e7fa3377ead3b5570b5da03ceac696601ff7ee6f5fe8864e2998db9babdf5eeba1a0cd5b4d44b3fcbd181b"],"blobGasUsed":"0xb10b","excessBlobGas":"0xb10b"}"#; let payload: ExecutionPayload = serde_json::from_str(s).unwrap(); assert_eq!(serde_json::to_string(&payload).unwrap(), s); } diff --git a/crates/rpc/rpc-types/src/eth/state.rs b/crates/rpc/rpc-types/src/eth/state.rs index 38e971083a5..b9e1221850d 100644 --- a/crates/rpc/rpc-types/src/eth/state.rs +++ b/crates/rpc/rpc-types/src/eth/state.rs @@ -24,11 +24,11 @@ pub struct AccountOverride { /// Fake key-value mapping to override all slots in the account storage before executing the /// call. #[serde(default, skip_serializing_if = "Option::is_none")] - pub state: Option>, + pub state: Option>, /// Fake key-value mapping to override individual slots in the account storage before executing /// the call. #[serde(default, skip_serializing_if = "Option::is_none")] - pub state_diff: Option>, + pub state_diff: Option>, } #[cfg(test)] @@ -48,4 +48,23 @@ mod tests { .unwrap(); assert!(acc.code.is_some()); } + #[test] + fn test_state_override_state_diff() { + let s = r#"{ + "0x1b5212AF6b76113afD94cD2B5a78a73B7d7A8222": { + "balance": "0x39726378b58c400000", + "stateDiff": {} + }, + "0xdAC17F958D2ee523a2206206994597C13D831ec7": { + "stateDiff": { + "0xede27e4e7f3676edbf125879f17a896d6507958df3d57bda6219f1880cae8a41": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + } + }"#; + let state_override: StateOverride = serde_json::from_str(s).unwrap(); + let acc = state_override + .get(&"0x1b5212AF6b76113afD94cD2B5a78a73B7d7A8222".parse().unwrap()) + .unwrap(); + assert!(acc.state_diff.is_some()); + } } diff --git a/crates/rpc/rpc-types/src/eth/trace/parity.rs b/crates/rpc/rpc-types/src/eth/trace/parity.rs index 025c922d96c..ecb73c232c5 100644 --- a/crates/rpc/rpc-types/src/eth/trace/parity.rs +++ b/crates/rpc/rpc-types/src/eth/trace/parity.rs @@ -71,8 +71,8 @@ pub enum Delta { #[serde(rename_all = "camelCase")] pub struct AccountDiff { pub balance: Delta, - pub nonce: Delta, pub code: Delta, + pub nonce: Delta, pub storage: BTreeMap>, } diff --git a/crates/rpc/rpc-types/src/eth/transaction/receipt.rs b/crates/rpc/rpc-types/src/eth/transaction/receipt.rs index 8b37b556d71..5e43607be2b 100644 --- a/crates/rpc/rpc-types/src/eth/transaction/receipt.rs +++ b/crates/rpc/rpc-types/src/eth/transaction/receipt.rs @@ -14,32 +14,32 @@ pub struct TransactionReceipt { pub block_hash: Option, /// Number of the block this transaction was included within. pub block_number: Option, - /// Address of the sender - pub from: Address, - /// Address of the receiver. null when its a contract creation transaction. - pub to: Option
, /// Cumulative gas used within the block after this was executed. pub cumulative_gas_used: U256, /// Gas used by this transaction alone. pub gas_used: Option, + /// The price paid post-execution by the transaction (i.e. base fee + priority fee). Both + /// fields in 1559-style transactions are maximums (max fee + max priority fee), the amount + /// that's actually paid by users can only be determined post-execution + pub effective_gas_price: U128, + /// Address of the sender + pub from: Address, + /// Address of the receiver. null when its a contract creation transaction. + pub to: Option
, /// Contract address created, or None if not a deployment. pub contract_address: Option
, /// Logs emitted by this transaction. pub logs: Vec, + /// Logs bloom + pub logs_bloom: Bloom, /// The post-transaction stateroot (pre Byzantium) /// /// EIP98 makes this optional field, if it's missing then skip serializing it #[serde(skip_serializing_if = "Option::is_none", rename = "root")] pub state_root: Option, - /// Logs bloom - pub logs_bloom: Bloom, /// Status: either 1 (success) or 0 (failure). Only present after activation of EIP-658 #[serde(skip_serializing_if = "Option::is_none", rename = "status")] pub status_code: Option, - /// The price paid post-execution by the transaction (i.e. base fee + priority fee). Both - /// fields in 1559-style transactions are maximums (max fee + max priority fee), the amount - /// that's actually paid by users can only be determined post-execution - pub effective_gas_price: U128, /// EIP-2718 Transaction type, Some(1) for AccessList transaction, None for Legacy #[serde(rename = "type")] pub transaction_type: U8, diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index cbceadc43d7..22b99eb3000 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -55,7 +55,7 @@ rayon.workspace = true # metrics reth-metrics.workspace = true -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true # misc bytes.workspace = true diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index aac4f176efd..a18a5cf6ff0 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -12,7 +12,9 @@ use crate::{ }; use async_trait::async_trait; use jsonrpsee::core::RpcResult; -use reth_primitives::{Account, Block, BlockId, BlockNumberOrTag, Bytes, TransactionSigned, H256}; +use reth_primitives::{ + Account, Block, BlockId, BlockNumberOrTag, Bytes, TransactionSigned, H160, H256, +}; use reth_provider::{BlockReaderIdExt, HeaderProvider, StateProviderBox}; use reth_revm::{ database::{State, SubState}, @@ -25,7 +27,6 @@ use reth_revm::{ use reth_rlp::{Decodable, Encodable}; use reth_rpc_api::DebugApiServer; use reth_rpc_types::{ - state::StateOverride, trace::geth::{ BlockTraceResult, FourByteFrame, GethDebugBuiltInTracerType, GethDebugTracerType, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, NoopFrame, TraceResult, @@ -248,9 +249,10 @@ where .inner .eth_api .spawn_with_call_at(call, at, overrides, move |db, env| { - inspect(db, env, &mut inspector)?; - let frame = - inspector.into_geth_builder().geth_call_traces(call_config); + let (res, _) = inspect(db, env, &mut inspector)?; + let frame = inspector + .into_geth_builder() + .geth_call_traces(call_config, res.result.gas_used()); Ok(frame.into()) }) .await?; @@ -340,8 +342,7 @@ where &self, bundles: Vec, state_context: Option, - opts: Option, - state_override: Option, + opts: Option, ) -> EthResult> { if bundles.is_empty() { return Err(EthApiError::InvalidParams(String::from("bundles are empty."))) @@ -356,8 +357,9 @@ where self.inner.eth_api.block_by_id(target_block), )?; + let opts = opts.unwrap_or_default(); let block = block.ok_or_else(|| EthApiError::UnknownBlockNumber)?; - let tracing_options = opts.unwrap_or_default(); + let GethDebugTracingCallOptions { tracing_options, mut state_overrides, .. } = opts; let gas_limit = self.inner.eth_api.call_gas_limit(); // we're essentially replaying the transactions in the block here, hence we need the state @@ -399,18 +401,22 @@ where while let Some(bundle) = bundles.next() { //let mut result = Vec::with_capacity(bundle.len()); let Bundle { transactions, block_override } = bundle; - let overrides = - EvmOverrides::new(state_override.clone(), block_override.map(Box::new)); + + let block_overrides = block_override.map(Box::new); let mut transactions = transactions.into_iter().peekable(); while let Some(tx) = transactions.next() { + // apply state overrides only once, before the first transaction + let state_overrides = state_overrides.take(); + let overrides = EvmOverrides::new(state_overrides, block_overrides.clone()); + let env = prepare_call_env( cfg.clone(), block_env.clone(), tx, gas_limit, &mut db, - overrides.clone(), + overrides, )?; let (trace, state) = this.trace_transaction( @@ -469,7 +475,9 @@ where let (res, _) = inspect(db, env, &mut inspector)?; - let frame = inspector.into_geth_builder().geth_call_traces(call_config); + let frame = inspector + .into_geth_builder() + .geth_call_traces(call_config, res.result.gas_used()); return Ok((frame.into(), res.state)) } @@ -647,6 +655,213 @@ where Ok(()) } + async fn debug_account_range( + &self, + _block_number: BlockNumberOrTag, + _start: Bytes, + _max_results: u64, + _nocode: bool, + _nostorage: bool, + _incompletes: bool, + ) -> RpcResult<()> { + Ok(()) + } + + async fn debug_block_profile(&self, _file: String, _seconds: u64) -> RpcResult<()> { + Ok(()) + } + + async fn debug_chaindb_compact(&self) -> RpcResult<()> { + Ok(()) + } + + async fn debug_chaindb_property(&self, _property: String) -> RpcResult<()> { + Ok(()) + } + + async fn debug_cpu_profile(&self, _file: String, _seconds: u64) -> RpcResult<()> { + Ok(()) + } + + async fn debug_db_ancient(&self, _kind: String, _number: u64) -> RpcResult<()> { + Ok(()) + } + + async fn debug_db_ancients(&self) -> RpcResult<()> { + Ok(()) + } + + async fn debug_db_get(&self, _key: String) -> RpcResult<()> { + Ok(()) + } + + async fn debug_dump_block(&self, _number: BlockId) -> RpcResult<()> { + Ok(()) + } + + async fn debug_free_os_memory(&self) -> RpcResult<()> { + Ok(()) + } + + async fn debug_freeze_client(&self, _node: String) -> RpcResult<()> { + Ok(()) + } + + async fn debug_gc_stats(&self) -> RpcResult<()> { + Ok(()) + } + + async fn debug_get_accessible_state( + &self, + _from: BlockNumberOrTag, + _to: BlockNumberOrTag, + ) -> RpcResult<()> { + Ok(()) + } + + async fn debug_get_modified_accounts_by_hash( + &self, + _start_hash: H256, + _end_hash: H256, + ) -> RpcResult<()> { + Ok(()) + } + + async fn debug_get_modified_accounts_by_number( + &self, + _start_number: u64, + _end_number: u64, + ) -> RpcResult<()> { + Ok(()) + } + + async fn debug_go_trace(&self, _file: String, _seconds: u64) -> RpcResult<()> { + Ok(()) + } + + async fn debug_intermediate_roots( + &self, + _block_hash: H256, + _opts: Option, + ) -> RpcResult<()> { + Ok(()) + } + + async fn debug_mem_stats(&self) -> RpcResult<()> { + Ok(()) + } + + async fn debug_mutex_profile(&self, _file: String, _nsec: u64) -> RpcResult<()> { + Ok(()) + } + + async fn debug_preimage(&self, _hash: H256) -> RpcResult<()> { + Ok(()) + } + + async fn debug_print_block(&self, _number: u64) -> RpcResult<()> { + Ok(()) + } + + async fn debug_seed_hash(&self, _number: u64) -> RpcResult { + Ok(Default::default()) + } + + async fn debug_set_block_profile_rate(&self, _rate: u64) -> RpcResult<()> { + Ok(()) + } + + async fn debug_set_gc_percent(&self, _v: i32) -> RpcResult<()> { + Ok(()) + } + + async fn debug_set_head(&self, _number: u64) -> RpcResult<()> { + Ok(()) + } + + async fn debug_set_mutex_profile_fraction(&self, _rate: i32) -> RpcResult<()> { + Ok(()) + } + + async fn debug_set_trie_flush_interval(&self, _interval: String) -> RpcResult<()> { + Ok(()) + } + + async fn debug_stacks(&self) -> RpcResult<()> { + Ok(()) + } + + async fn debug_standard_trace_bad_block_to_file( + &self, + _block: BlockNumberOrTag, + _opts: Option, + ) -> RpcResult<()> { + Ok(()) + } + + async fn debug_standard_trace_block_to_file( + &self, + _block: BlockNumberOrTag, + _opts: Option, + ) -> RpcResult<()> { + Ok(()) + } + + async fn debug_start_cpu_profile(&self, _file: String) -> RpcResult<()> { + Ok(()) + } + + async fn debug_start_go_trace(&self, _file: String) -> RpcResult<()> { + Ok(()) + } + + async fn debug_stop_cpu_profile(&self) -> RpcResult<()> { + Ok(()) + } + + async fn debug_stop_go_trace(&self) -> RpcResult<()> { + Ok(()) + } + + async fn debug_storage_range_at( + &self, + _block_hash: H256, + _tx_idx: usize, + _contract_address: H160, + _key_start: H256, + _max_result: u64, + ) -> RpcResult<()> { + Ok(()) + } + + async fn debug_trace_bad_block( + &self, + _block_hash: H256, + _opts: Option, + ) -> RpcResult<()> { + Ok(()) + } + + async fn debug_verbosity(&self, _level: usize) -> RpcResult<()> { + Ok(()) + } + + async fn debug_vmodule(&self, _pattern: String) -> RpcResult<()> { + Ok(()) + } + + async fn debug_write_block_profile(&self, _file: String) -> RpcResult<()> { + Ok(()) + } + + async fn debug_write_mem_profile(&self, _file: String) -> RpcResult<()> { + Ok(()) + } + + async fn debug_write_mutex_profile(&self, _file: String) -> RpcResult<()> { + Ok(()) + } + /// Handler for `debug_getRawBlock` async fn raw_block(&self, block_id: BlockId) -> RpcResult { let block = self.inner.provider.block_by_id(block_id).to_rpc_result()?; @@ -762,12 +977,10 @@ where &self, bundles: Vec, state_context: Option, - opts: Option, - state_override: Option, + opts: Option, ) -> RpcResult> { let _permit = self.acquire_trace_permit().await; - Ok(DebugApi::debug_trace_call_many(self, bundles, state_context, opts, state_override) - .await?) + Ok(DebugApi::debug_trace_call_many(self, bundles, state_context, opts).await?) } } diff --git a/crates/rpc/rpc/src/eth/api/call.rs b/crates/rpc/rpc/src/eth/api/call.rs index e47555429ca..6e40b76b6ad 100644 --- a/crates/rpc/rpc/src/eth/api/call.rs +++ b/crates/rpc/rpc/src/eth/api/call.rs @@ -75,7 +75,7 @@ where &self, bundle: Bundle, state_context: Option, - state_override: Option, + mut state_override: Option, ) -> EthResult> { let Bundle { transactions, block_override } = bundle; if transactions.is_empty() { @@ -123,17 +123,21 @@ where } } - let overrides = EvmOverrides::new(state_override.clone(), block_override.map(Box::new)); + let block_overrides = block_override.map(Box::new); let mut transactions = transactions.into_iter().peekable(); while let Some(tx) = transactions.next() { + // apply state overrides only once, before the first transaction + let state_overrides = state_override.take(); + let overrides = EvmOverrides::new(state_overrides, block_overrides.clone()); + let env = prepare_call_env( cfg.clone(), block_env.clone(), tx, gas_limit, &mut db, - overrides.clone(), + overrides, )?; let (res, _) = transact(&mut db, env)?; diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index 4d48aa60598..1481325f6e7 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -480,6 +480,9 @@ pub enum RpcPoolError { /// Custom pool error #[error("{0:?}")] PoolTransactionError(Box), + /// Unable to find the blob for an EIP4844 transaction + #[error("blob not found for EIP4844 transaction")] + MissingEip4844Blob, #[error(transparent)] Other(Box), } @@ -518,6 +521,7 @@ impl From for RpcPoolError { InvalidPoolTransactionError::OversizedData(_, _) => RpcPoolError::OversizedData, InvalidPoolTransactionError::Underpriced => RpcPoolError::Underpriced, InvalidPoolTransactionError::Other(err) => RpcPoolError::PoolTransactionError(err), + InvalidPoolTransactionError::MissingEip4844Blob => RpcPoolError::MissingEip4844Blob, } } } diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 5332de726b0..d7ab17c3adc 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -135,7 +135,7 @@ where /// Returns an error if no matching log filter exists. /// /// Handler for `eth_getFilterLogs` - pub async fn filter_logs(&self, id: FilterId) -> Result, FilterError> { + pub async fn filter_logs(&self, id: FilterId) -> Result { let filter = { let filters = self.inner.active_filters.inner.lock().await; if let FilterKind::Log(ref filter) = @@ -148,7 +148,8 @@ where } }; - self.inner.logs_for_filter(filter).await + let logs = self.inner.logs_for_filter(filter).await?; + Ok(FilterChanges::Logs(logs)) } } @@ -187,7 +188,7 @@ where /// Returns an error if no matching log filter exists. /// /// Handler for `eth_getFilterLogs` - async fn filter_logs(&self, id: FilterId) -> RpcResult> { + async fn filter_logs(&self, id: FilterId) -> RpcResult { trace!(target: "rpc::eth", "Serving eth_getFilterLogs"); Ok(EthFilter::filter_logs(self, id).await?) } diff --git a/crates/rpc/rpc/src/eth/revm_utils.rs b/crates/rpc/rpc/src/eth/revm_utils.rs index d83b3d53e10..1dc65db8e2b 100644 --- a/crates/rpc/rpc/src/eth/revm_utils.rs +++ b/crates/rpc/rpc/src/eth/revm_utils.rs @@ -488,19 +488,13 @@ where account, new_account_state .into_iter() - .map(|(slot, value)| { - (U256::from_be_bytes(slot.0), U256::from_be_bytes(value.0)) - }) + .map(|(slot, value)| (U256::from_be_bytes(slot.0), value)) .collect(), )?; } (None, Some(account_state_diff)) => { for (slot, value) in account_state_diff { - db.insert_account_storage( - account, - U256::from_be_bytes(slot.0), - U256::from_be_bytes(value.0), - )?; + db.insert_account_storage(account, U256::from_be_bytes(slot.0), value)?; } } }; diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index 98846248b37..7ae6f5fca87 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -38,7 +38,7 @@ serde.workspace = true # metrics reth-metrics.workspace = true -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true # misc thiserror.workspace = true diff --git a/crates/stages/src/stages/sender_recovery.rs b/crates/stages/src/stages/sender_recovery.rs index 9f72c69d6e3..002f0b6708a 100644 --- a/crates/stages/src/stages/sender_recovery.rs +++ b/crates/stages/src/stages/sender_recovery.rs @@ -212,11 +212,7 @@ fn stage_checkpoint( ) -> Result { let pruned_entries = provider .get_prune_checkpoint(PrunePart::SenderRecovery)? - .map(|checkpoint| provider.block_body_indices(checkpoint.block_number)) - .transpose()? - .flatten() - // +1 is needed because TxNumber is 0-indexed - .map(|body| body.last_tx_num() + 1) + .and_then(|checkpoint| checkpoint.tx_number) .unwrap_or_default(); Ok(EntitiesCheckpoint { // If `TxSenders` table was pruned, we will have a number of entries in it not matching @@ -409,7 +405,13 @@ mod tests { .save_prune_checkpoint( PrunePart::SenderRecovery, PruneCheckpoint { - block_number: max_pruned_block as BlockNumber, + block_number: Some(max_pruned_block), + tx_number: Some( + blocks[..=max_pruned_block as usize] + .iter() + .map(|block| block.body.len() as u64) + .sum::(), + ), prune_mode: PruneMode::Full, }, ) diff --git a/crates/stages/src/stages/tx_lookup.rs b/crates/stages/src/stages/tx_lookup.rs index 211266d45d8..65f5772b74e 100644 --- a/crates/stages/src/stages/tx_lookup.rs +++ b/crates/stages/src/stages/tx_lookup.rs @@ -13,7 +13,7 @@ use reth_primitives::{ stage::{EntitiesCheckpoint, StageCheckpoint, StageId}, PrunePart, TransactionSignedNoHash, TxNumber, H256, }; -use reth_provider::{BlockReader, DatabaseProviderRW, PruneCheckpointReader}; +use reth_provider::{DatabaseProviderRW, PruneCheckpointReader}; use tokio::sync::mpsc; use tracing::*; @@ -186,11 +186,7 @@ fn stage_checkpoint( ) -> Result { let pruned_entries = provider .get_prune_checkpoint(PrunePart::TransactionLookup)? - .map(|checkpoint| provider.block_body_indices(checkpoint.block_number)) - .transpose()? - .flatten() - // +1 is needed because TxNumber is 0-indexed - .map(|body| body.last_tx_num() + 1) + .and_then(|checkpoint| checkpoint.tx_number) .unwrap_or_default(); Ok(EntitiesCheckpoint { // If `TxHashNumber` table was pruned, we will have a number of entries in it not matching @@ -365,7 +361,13 @@ mod tests { .save_prune_checkpoint( PrunePart::TransactionLookup, PruneCheckpoint { - block_number: max_pruned_block as BlockNumber, + block_number: Some(max_pruned_block), + tx_number: Some( + blocks[..=max_pruned_block as usize] + .iter() + .map(|block| block.body.len() as u64) + .sum::(), + ), prune_mode: PruneMode::Full, }, ) diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 433a0ccee9a..f4bcd553aa7 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -32,7 +32,7 @@ modular-bitfield = "0.11.2" # metrics reth-metrics.workspace = true -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true # misc bytes.workspace = true diff --git a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/ChangeLog.md b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/ChangeLog.md deleted file mode 100644 index a1b8321dfac..00000000000 --- a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/ChangeLog.md +++ /dev/null @@ -1,1577 +0,0 @@ -ChangeLog ---------- - -English version [by Google](https://gitflic-ru.translate.goog/project/erthink/libmdbx/blob?file=ChangeLog.md&_x_tr_sl=ru&_x_tr_tl=en) -and [by Yandex](https://translated.turbopages.org/proxy_u/ru-en.en/https/gitflic.ru/project/erthink/libmdbx/blob?file=ChangeLog.md). - - -## v0.12.6 "ЦСКА" от 2023-04-29 - -Стабилизирующий выпуск с исправлением обнаруженных ошибок и устранением -недочетов, в день 100-летнего юбилея спортивного клуба [«ЦСКА»](https://ru.wikipedia.org/wiki/Центральный_спортивный_клуб_Армии). - -``` -14 files changed, 117 insertions(+), 83 deletions(-) -Signed-off-by: Леонид Юрьев (Leonid Yuriev) -``` - -Мелочи: - - - Обновление патча для старых версий buildroot. - - Использование clang-format-16. - - Использование `enum`-типов вместо `int` для устранения предупреждений GCC 13, - что могло ломать сборку в Fedora 38. - - --------------------------------------------------------------------------------- - - -## v0.12.5 "Динамо" от 2023-04-18 - -Стабилизирующий выпуск с исправлением обнаруженных ошибок и устранением -недочетов, в день 100-летнего юбилея спортивного общества [«Динамо»](https://ru.wikipedia.org/wiki/Динамо_(спортивное_общество)). - -``` -16 files changed, 686 insertions(+), 247 deletions(-) -Signed-off-by: Леонид Юрьев (Leonid Yuriev) -``` - -Благодарности: - - - Max за сообщение о проблеме экспорта из DSO/DLL - устаревших функций API. - - [`@calvin3721`](https://t.me/calvin3721) за сообщение о проблеме работы - `MainDB` с флагами не по-умолчанию. - -Исправления: - - - Поправлен экспорт из DSO/DLL устаревших функций, - которые заменены на inline в текущем API. - - Устранено использование неверного компаратора при создании или пересоздании - `MainDB` с флагами/опциями предполагающим использование специфического - компаратора (не по-умолчанию). - -Мелочи: - - - Удалена дублирующая диагностика внутри `node_read_bigdata()`. - - Исправлены ссылки в описании `mdbx_env_set_geometry()`. - - Добавлен отдельный тест `extra/upsert_alldups` для специфического - сценария замены/перезаписи одним значением всех multi-значений - соответствующих ключу, т.е. замена всех «дубликатов» одним значением. - - В C++ API добавлены варианты `buffer::key_from()` с явным именованием по типу данных. - - Добавлен отдельный тест `extra/maindb_ordinal` для специфического - сценария создания `MainDB` с флагами требующими использования - компаратора не по-умолчанию. - - Рефакторинг проверки "когерентности" мета-страниц. - - Корректировка `osal_vasprintf()` для устранения предупреждений статических анализаторов. - - --------------------------------------------------------------------------------- - - -## v0.12.4 "Арта-333" от 2023-03-03 - -Стабилизирующий выпуск с исправлением обнаруженных ошибок, устранением -недочетов и технических долгов. Ветка 0.12 считается готовой к -продуктовому использованию, получает статус стабильной и далее будет -получать только исправление ошибок. Разработка будет продолжена в ветке -0.13, а ветка 0.11 становится архивной. - -``` -63 files changed, 1161 insertions(+), 569 deletions(-) -Signed-off-by: Леонид Юрьев (Leonid Yuriev) -``` - -Благодарности: - - - Max за сообщение о проблеме ERROR_SHARING_VIOLATION - в режиме MDBX_EXCLUSIVE на Windows. - - Alisher Ashyrov за сообщение о проблеме - с assert-проверкой и содействие в отладке. - - Masatoshi Fukunaga за сообщение о проблеме - `put(MDBX_UPSERT+MDBX_ALLDUPS)` для случая замены всех значений в subDb. - -Исправления: - - - Устранен регресс после коммита 474391c83c5f81def6fdf3b0b6f5716a87b78fbf, - приводящий к возврату ERROR_SHARING_VIOLATION в Windows при открытии БД - в режиме MDBX_EXCLUSIVE для чтения-записи. - - - Добавлено ограничение размера отображения при коротком read-only файле, для - предотвращения ошибки ERROR_NOT_ENOUGH_MEMORY в Windows, которая возникает - в этом случае и совсем не информативна для пользователя. - - - Произведен рефакторинг `dxb_resize()`, в том числе, для устранения срабатывания - assert-проверки `size_bytes == env->me_dxb_mmap.current` в специфических - многопоточных сценариях использования. Проверка срабатывала только в - отладочных сборках, при специфическом наложении во времени читающей и - пишущей транзакции в разных потоках, одновременно с изменением размера БД. - Кроме срабатывание проверки, каких-либо других последствий не возникало. - - - Устранена проблема в `put(MDBX_UPSERT+MDBX_ALLDUPS)` для случая замены - всех значений единственного ключа в subDb. В ходе этой операции subDb - становится полностью пустой, без каких-либо страниц и именно эта - ситуация не была учтена в коде, что приводило к повреждению БД - при фиксации такой транзакции. - - - Устранена излишняя assert-проверка внутри `override_meta()`. - Что в отладочных сборках могло приводить к ложным срабатываниям - при восстановлении БД, в том числе при автоматическом откате слабых - мета-страниц. - - - Скорректированы макросы `__cold`/`__hot`, в том числе для устранения проблемы - `error: inlining failed in call to ‘always_inline FOO(...)’: target specific option mismatch` - при сборке посредством GCC >10.x для SH4. - -Ликвидация технических долгов и мелочи: - - - Исправлены многочисленные опечатки в документации. - - Доработан тест для полной стохастической проверки `MDBX_EKEYMISMATCH` в режиме `MDBX_APPEND`. - - Расширены сценарии запуска `mdbx_chk` из CMake-тестов для проверки как в обычном, - так и эксклюзивном режимах чтения-записи. - - Уточнены спецификаторы `const` и `noexcept` для нескольких методов в C++ API. - - Устранено использование стека под буферы для `wchar`-преобразования путей. - - Для Windows добавлена функция `mdbx_env_get_path()` для получения пути к БД - в формате многобайтных символов. - - Добавлены doxygen-описания для API с широкими символами. - - Устранены предупреждения статического анализатора MSVC, - все они были несущественные, либо ложные. - - Устранено ложное предупреждение GCC при сборке для SH4. - - Добавлена поддержка ASAN (Address Sanitizer) при сборке посредством MSVC. - - Расширен набор перебираемых режимов в скрипте `test/long_stochastic.sh`, - добавлена опция `--extra`. - - В C++ API добавлена поддержка расширенных опций времени выполнения `mdbx::extra_runtime_option`, - аналогично `enum MDBX_option_t` из C API. - - Вывод всех счетчиков page-operations в `mdbx_stat`. - - --------------------------------------------------------------------------------- - - -## v0.12.3 "Акула" от 2023-01-07 - -Выпуск с существенными доработками и новой функциональностью в память о закрытом open-source -[проекте "Акула"](https://erigon.substack.com/p/winding-down-support-for-akula-project). - -Добавлена prefault-запись, переделан контроль “некогерентности” unified page/buffer cache, изменена тактика слияния страниц и т.д. -Стало ещё быстрее, в некоторых сценариях вдвое. - -``` -20 files changed, 4508 insertions(+), 2928 deletions(-) -Signed-off-by: Леонид Юрьев (Leonid Yuriev) -``` - -Благодарности: - - - [Alex Sharov](https://t.me/AskAlexSharov) и команде [Erigon](https://github.com/ledgerwatch/erigon) за тестирование. - - [Simon Leier](https://t.me/leisim) за сообщение о сбоях и тестирование. - -Новое: - - - Использование адреса [https://libmdbx.dqdkfa.ru/dead-github](https://libmdbx.dqdkfa.ru/dead-github) - для отсылки к сохранённым в web.archive.org копиям ресурсов, уничтоженных администрацией Github. - - - Реализована prefault-запись при выделении страниц для read-write отображений. - Это приводит к кратному снижению системных издержек и существенному увеличению - производительности в соответствующих сценариях использования, когда: - - размер БД и объём данных существенно больше ОЗУ; - - используется режим `MDBX_WRITEMAP`; - - не-мелкие транзакции (по ходу транзакции выделяется многие сотни или тысячи страниц). - - В режиме `MDBX_WRITEMAP` выделение/переиспользование страниц приводит - к page-fault и чтению страницы с диска, даже если содержимое страницы - не нужно (будет перезаписано). Это является следствием работы подсистемы - виртуальной памяти, а штатный способ лечения через `MADV_REMOVE` - работает не на всех ФС и обычно дороже получаемой экономии. - - Теперь в libmdbx используется "упреждающая запись" таких страниц, - которая на системах с [unified page cache](https://www.opennet.ru/base/dev/ubc.txt.html) - приводит к "вталкиванию" данных, устраняя необходимость чтения с диска при - обращении к такой странице памяти. - - Новый функционал работает в согласованности с автоматическим управлением read-ahead - и кэшем статуса присутствия страниц в ОЗУ, посредством [mincore()](https://man7.org/linux/man-pages/man2/mincore.2.html). - - - Добавлена опция `MDBX_opt_prefault_write_enable` для возможности принудительного - включения/выключения prefault-записи. - - - Реализован динамический выбор между сквозной записью на диск и обычной записью - с последующим [fdatasync()](https://man7.org/linux/man-pages/man3/fdatasync.3p.html) - управляемый опцией `MDBX_opt_writethrough_threshold`. - - В долговечных (durable) режимах данные на диск могут быть сброшены двумя способами: - - сквозной записью через файловый дескриптор открытый с `O_DSYNC`; - - обычной записью с последующим вызовом `fdatasync()`. - - Первый способ выгоднее при записи малого количества страниц и/или если - канал взаимодействия с диском/носителем имеет близкую к нулю задержку. - Второй способ выгоднее если требуется записать много страниц и/или канал - взаимодействия имеет весомую задержку (датацентры, облака). Добавленная - опция `MDBX_opt_writethrough_threshold` позволяет во время выполнения - задать порог для динамического выбора способа записи в зависимости от - объема и конкретных условия использования. - - - Автоматическая установка `MDBX_opt_rp_augment_limit` в зависимости от размера БД. - - - Запрещение разного режима `MDBX_WRITEMAP` между процессами в режимах - с отложенной/ленивой записью, так как в этом случае невозможно - обеспечить сброс данных на диск во всех случаях на всех поддерживаемых платформах. - - - Добавлена опция сборки `MDBX_MMAP_USE_MS_ASYNC` позволяющая отключить - использование системного вызова `msync(MS_ASYNC)`, в использовании - которого нет необходимости на подавляющем большинстве актуальных ОС. - По-умолчанию `MDBX_MMAP_USE_MS_ASYNC=0` (выключено) на Linux и других - системах с unified page cache. Такое поведение (без использования - `msync(MS_ASYNC)`) соответствует неизменяемой (hardcoded) логике LMDB. В - результате, в простых/наивных бенчмарках, libmdbx опережает LMDB - примерно также как при реальном применении. - - На всякий случай стоит еще раз отметить/напомнить, что на Windows - предположительно libmdbx будет отставать от LMDB в сценариях с - множеством мелких транзакций, так как libmdbx осознанно использует на - Windows файловые блокировки, которые медленные (плохо реализованы в ядре - ОС), но позволяют застраховать пользователей от массы неверных действий - приводящих к повреждению БД. - - - Поддержка не-печатных имен для subDb. - - - Добавлен явный выбор `tls_model("local-dynamic")` для обхода проблемы - `relocation R_X86_64_TPOFF32 against FOO cannot be used with -shared` - из-за ошибки в CLANG приводящей к использованию неверного режима `ls_model`. - - - Изменение тактики слияния страниц при удалении. - Теперь слияние выполняется преимущественно с уже измененной/грязной страницей. - Если же справа и слева обе страницы с одинаковым статусом, - то с наименее заполненной, как прежде. В сценариях с массивным удалением - это позволяет увеличить производительность до 50%. - - - Добавлен контроль отсутствия LCK-файлов с альтернативным именованием. - -Исправления (без корректировок новых функций): - - - Изменение размера отображения если это требуется для сброса данных на - диск при вызове `mdbx_env_sync()` из параллельного потока выполнения вне - работающей транзакции. - - - Исправление регресса после коммита db72763de049d6e4546f838277fe83b9081ad1de от 2022-10-08 - в логике возврата грязных страниц в режиме `MDBX_WRITEMAP`, из-за чего - освободившиеся страницы использовались не немедленно, а попадали в - retired-список совершаемой транзакции и происходил необоснованный рост - размера транзакции. - - - Устранение SIGSEGV или ошибочного вызова `free()` в ситуациях - повторного открытия среды посредством `mdbx_env_open()`. - - - Устранение ошибки совершенной в коммите fe20de136c22ed3bc4c6d3f673e79c106e824f60 от 2022-09-18, - в результате чего на Linux в режиме `MDBX_WRITEMAP` никогда не вызывался `msync()`. - Проблема существует только в релизе 0.12.2. - - - Добавление подсчета грязных страниц в `MDBX_WRITEMAP` для предоставления посредством `mdbx_txn_info()` - актуальной информации об объеме изменений в процессе транзакций чтения-записи. - - - Исправление несущественной опечатки в условиях `#if` определения порядка байт. - - - Исправление сборки для случая `MDBX_PNL_ASCENDING=1`. - -Ликвидация технических долгов и мелочи: - - - Доработка поддержки авто-слияния записей GC внутри `page_alloc_slowpath()`. - - Устранение несущественных предупреждений Coverity. - - Использование единого курсора для поиска в GC. - - Переработка внутренних флагов связанных с выделением страниц из GC. - - Доработка подготовки резерва перед обновлением GC при включенном BigFoot. - - Оптимизация `pnl_merge()` для случаев неперекрывающихся объединяемых списков. - - Оптимизация поддержки отсортированного списка страниц в `dpl_append()`. - - Ускорение работы `mdbx_chk` при обработке пользовательских записей в `@MAIN`. - - Переработка LRU-отметок для спиллинга. - - Переработка контроля "некогерентности" Unified page cache для уменьшения накладных расходов. - - Рефакторинг и микрооптимизация. - - --------------------------------------------------------------------------------- - - -## v0.12.2 "Иван Ярыгин" от 2022-11-11 - -Выпуск с существенными доработками и новой функциональностью -в память о российском борце [Иване Сергеевиче Ярыгине](https://ru.wikipedia.org/wiki/Ярыгин,_Иван_Сергеевич). - -На Олимпийских играх в Мюнхене в 1972 году Иван Ярыгин уложил всех соперников на лопатки, -суммарно затратив менее 9 минут. Этот рекорд никем не побит до сих пор. - -``` -64 files changed, 5573 insertions(+), 2510 deletions(-) -Signed-off-by: Леонид Юрьев (Leonid Yuriev) -``` - -Новое: - - - Поддержка всех основных опций при сборке посредством CMake. - - - Требования к CMake понижены до версии 3.0.2 для возможности сборки для устаревших платформ. - - - Добавлена возможность профилирования работы GC в сложных и/или нагруженных - сценариях (например Ethereum/Erigon). По-умолчанию соответствующий код отключен, - а для его активации необходимо указать опцию сборки `MDBX_ENABLE_PROFGC=1`. - - - Добавлена функция `mdbx_env_warmup()` для "прогрева" БД с возможностью - закрепления страниц в памяти. - В утилиты `mdbx_chk`, `mdbx_copy` и `mdbx_dump` добавлены опции `-u` и `-U` - для активации соответствующего функционала. - - - Отключение учета «грязных» страниц в не требующих этого режимах - (`MDBX_WRITEMAP` при `MDBX_AVOID_MSYNC=0`). Доработка позволяет снизить - накладные расходы и была запланирована давно, но откладывалась так как - требовала других изменений. - - - Вытеснение из памяти (спиллинг) «грязных» страниц с учетом размера - large/overflow-страниц. Доработка позволяет корректно соблюдать политику - задаваемую опциями `MDBX_opt_txn_dp_limit`, - `MDBX_opt_spill_max_denominator`, `MDBX_opt_spill_min_denominator` и - была запланирована давно, но откладывалась так как требовала других - изменений. - - - Для Windows в API добавлены UNICODE-зависимые определения макросов - `MDBX_DATANAME`, `MDBX_LOCKNAME` и `MDBX_LOCK_SUFFIX`. - - - Переход на преимущественное использование типа `size_t` для - уменьшения накладных расходов на платформе Эльбрус. - - - В API добавлены функции `mdbx_limits_valsize4page_max()` и - `mdbx_env_get_valsize4page_max()` возвращающие максимальный размер в - байтах значения, которое может быть размещена в одной - large/overflow-странице, а не последовательности из двух или более таких - страниц. Для таблиц с поддержкой дубликатов вынос значений на - large/overflow-страницы не поддерживается, поэтому результат совпадает с - `mdbx_limits_valsize_max()`. - - - В API добавлены функции `mdbx_limits_pairsize4page_max()`и - `mdbx_env_get_pairsize4page_max()` возвращающие в байтах максимальный - суммарный размер пары ключ-значение для их размещения на одной листовой - страницы, без выноса значения на отдельную large/overflow-страницу. Для - таблиц с поддержкой дубликатов вынос значений на large/overflow-страницы - не поддерживается, поэтому результат определяет максимальный/допустимый - суммарный размер пары ключ-значение. - - - Реализовано использование асинхронной (overlapped) записи в Windows, - включая использования небуфферизированного ввода-вывода и `WriteGather()`. - Это позволяет сократить накладные расходы и частично обойти проблемы - Windows с низкой производительностью ввода-вывода, включая большие - задержки `FlushFileBuffers()`. Новый код также обеспечивает консолидацию - записываемых регионов на всех платформах, а на Windows использование - событий (events) сведено к минимум, одновременно с автоматических - использованием `WriteGather()`. Поэтому ожидается существенное снижение - накладных расходов взаимодействия с ОС, а в Windows это ускорение, в - некоторых сценариях, может быть кратным в сравнении с LMDB. - - - Добавлена опция сборки `MDBX_AVOID_MSYNC`, которая определяет - поведение libmdbx в режиме `MDBX_WRITE_MAP` (когда данные изменяются - непосредственно в отображенных в ОЗУ страницах БД): - - * Если `MDBX_AVOID_MSYNC=0` (по умолчанию на всех системах кроме Windows), - то (как прежде) сохранение данных выполняется посредством `msync()`, - либо `FlushViewOfFile()` на Windows. На платформах с полноценной - подсистемой виртуальной памяти и адекватным файловым вводом-выводом - это обеспечивает минимум накладных расходов (один системный вызов) - и максимальную производительность. Однако, на Windows приводит - к значительной деградации, в том числе из-за того что после - `FlushViewOfFile()` требуется также вызов `FlushFileBuffers()` - с массой проблем и суеты внутри ядра ОС. - - * Если `MDBX_AVOID_MSYNC=1` (по умолчанию только на Windows), то - сохранение данных выполняется явной записью в файл каждой измененной - страницы БД. Это требует дополнительных накладных расходов, как - на отслеживание измененных страниц (ведение списков "грязных" - страниц), так и на системные вызовы для их записи. - Кроме этого, с точки зрения подсистемы виртуальной памяти ядра ОС, - страницы БД измененные в ОЗУ и явно записанные в файл, могут либо - оставаться "грязными" и быть повторно записаны ядром ОС позже, - либо требовать дополнительных накладных расходов для отслеживания - PTE (Page Table Entries), их модификации и дополнительного копирования - данных. Тем не менее, по имеющейся информации, на Windows такой путь - записи данных в целом обеспечивает более высокую производительность. - - - Улучшение эвристики включения авто-слияния записей GC. - - - Изменение формата LCK и семантики некоторых внутренних полей. Версии - libmdbx использующие разный формат не смогут работать с одной БД - одновременно, а только поочередно (LCK-файл переписывается при открытии - первым открывающим БД процессом). - - - В `C++` API добавлены методы фиксации транзакции с получением информации - о задержках. - - - Added `MDBX_HAVE_BUILT IN_CPU_SUPPORTS` build option to control use GCC's - `__builtin_cpu_supports()` function, which could be unavailable on a fake - OSes (macos, ios, android, etc). - -Исправления (без корректировок вышеперечисленных новых функций): - - - Устранения ряда предупреждений при сборке посредством MinGW. - - Устранение ложно-положительных сообщений от Valgrind об использовании - не инициализированных данных из-за выравнивающих зазоров в `struct troika`. - - Исправлен возврат неожиданной ошибки `MDBX_BUSY` из функций `mdbx_env_set_option()`, - `mdbx_env_set_syncbytes()` и `mdbx_env_set_syncperiod()`. - - Небольшие исправления для совместимости с CMake 3.8 - - Больше контроля и осторожности (паранойи) для страховки от дефектов `mremap()`. - - Костыль для починки сборки со старыми версиями `stdatomic.h` из GNU Lib C, - где макросы `ATOMIC_*_LOCK_FREE` ошибочно переопределяются через функции. - - Использование `fcntl64(F_GETLK64/F_SETLK64/F_SETLKW64)` при наличии. - Это решает проблему срабатывания проверочного утверждения при сборке для - платформ где тип `off_t` шире соответствующих полей `структуры flock`, - используемой для блокировки файлов. - - Доработан сбор информации о задержках при фиксации транзакций: - * Устранено искажение замеров длительности обновления GC - при включении отладочного внутреннего аудита; - * Защита от undeflow-нуля только общей задержки в метриках, - чтобы исключить ситуации, когда сумма отдельных стадий - больше общей длительности. - - Ряд исправлений для устранения срабатываний проверочных утверждения в - отладочных сборках. - - Более осторожное преобразование к типу `mdbx_tid_t` для устранения - предупреждений. - - Исправление лишнего сброса данных на диск в режиме `MDBX_SAFE_NOSYNC` - при обновлении GC. - - Fixed an extra check for `MDBX_APPENDDUP` inside `mdbx_cursor_put()` - which could result in returning `MDBX_EKEYMISMATCH` for valid cases. - - Fixed nasty `clz()` bug (by using `_BitScanReverse()`, only MSVC builds affected). - -Мелочи: - - - Исторические ссылки cвязанные с удалённым на ~~github~~ проектом перенаправлены на [web.archive.org](https://web.archive.org/web/https://github.com/erthink/libmdbx). - - Синхронизированы конструкции CMake между проектами. - - Добавлено предупреждение о небезопасности RISC-V. - - Добавлено описание параметров `MDBX_debug_func` и `MDBX_debug_func`. - - Добавлено обходное решение для минимизации ложно-положительных - конфликтов при использовании файловых блокировок в Windows. - - Проверка атомарности C11-операций c 32/64-битными данными. - - Уменьшение в 42 раза значения по-умолчанию для `me_options.dp_limit` - в отладочных сборках. - - Добавление платформы `gcc-riscv64-linux-gnu` в список для цели `cross-gcc`. - - Небольшие правки скрипта `long_stochastic.sh` для работы в Windows. - - Удаление ненужного вызова `LockFileEx()` внутри `mdbx_env_copy()`. - - Добавлено описание использования файловых дескрипторов в различных режимах. - - Добавлено использование `_CrtDbgReport()` в отладочных сборках. - - Fixed an extra ensure/assertion check of `oldest_reader` inside `txn_end()`. - - Removed description of deprecated usage of `MDBX_NODUPDATA`. - - Fixed regression ASAN/Valgring-enabled builds. - - Fixed minor MingGW warning. - - --------------------------------------------------------------------------------- - - -## v0.12.1 "Positive Proxima" at 2022-08-24 - -The planned frontward release with new superior features on the day of 20 anniversary of [Positive Technologies](https://ptsecurty.com). - -``` -37 files changed, 7604 insertions(+), 7417 deletions(-) -Signed-off-by: Леонид Юрьев (Leonid Yuriev) -``` - -New: - - - The `Big Foot` feature which significantly reduces GC overhead for processing large lists of retired pages from huge transactions. - Now _libmdbx_ avoid creating large chunks of PNLs (page number lists) which required a long sequences of free pages, aka large/overflow pages. - Thus avoiding searching, allocating and storing such sequences inside GC. - - Improved hot/online validation and checking of database pages both for more robustness and performance. - - New solid and fast method to latch meta-pages called `Troika`. - The minimum of memory barriers, reads, comparisons and conditional transitions are used. - - New `MDBX_VALIDATION` environment options to extra validation of DB structure and pages content for carefully/safe handling damaged or untrusted DB. - - Accelerated ×16/×8/×4 by AVX512/AVX2/SSE2/Neon implementations of search page sequences. - - Added the `gcrtime_seconds16dot16` counter to the "Page Operation Statistics" that accumulates time spent for GC searching and reclaiming. - - Copy-with-compactification now clears/zeroes unused gaps inside database pages. - - The `C` and `C++` APIs has been extended and/or refined to simplify using `wchar_t` pathnames. - On Windows the `mdbx_env_openW()`, ``mdbx_env_get_pathW()`()`, `mdbx_env_copyW()`, `mdbx_env_open_for_recoveryW()` are available for now, - but the `mdbx_env_get_path()` has been replaced in favor of `mdbx_env_get_pathW()`. - - Added explicit error message for Buildroot's Microblaze toolchain maintainers. - - Added `MDBX_MANAGE_BUILD_FLAGS` build options for CMake. - - Speed-up internal `bsearch`/`lower_bound` implementation using branchless tactic, including workaround for CLANG x86 optimiser bug. - - A lot internal refinement and micro-optimisations. - - Internally counted volume of dirty pages (unused for now but for coming features). - -Fixes: - - - Never use modern `__cxa_thread_atexit()` on Apple's OSes. - - Don't check owner for finished transactions. - - Fixed typo in `MDBX_EINVAL` which breaks MingGW builds with CLANG. - - -## v0.12.0 at 2022-06-19 - -Not a release but preparation for changing feature set and API. - - -******************************************************************************** - - -## v0.11.14 "Sergey Kapitsa" at 2023-02-14 - -The stable bugfix release in memory of [Sergey Kapitsa](https://en.wikipedia.org/wiki/Sergey_Kapitsa) on his 95th birthday. - -``` -22 files changed, 250 insertions(+), 174 deletions(-) -Signed-off-by: Леонид Юрьев (Leonid Yuriev) -``` - -Fixes: - - backport: Fixed insignificant typo of `||` inside `#if` byte-order condition. - - backport: Fixed `SIGSEGV` or an erroneous call to `free()` in situations where - errors occur when reopening by `mdbx_env_open()` of a previously used - environment. - - backport: Fixed `cursor_put_nochecklen()` internals for case when dupsort'ed named subDb - contains a single key with multiple values (aka duplicates), which are replaced - with a single value by put-operation with the `MDBX_UPSERT+MDBX_ALLDUPS` flags. - In this case, the database becomes completely empty, without any pages. - However exactly this condition was not considered and thus wasn't handled correctly. - See [issue#8](https://gitflic.ru/project/erthink/libmdbx/issue/8) for more information. - - backport: Fixed extra assertion inside `override_meta()`, which could - lead to false-positive failing of the assertion in a debug builds during - DB recovery and auto-rollback. - - backport: Refined the `__cold`/`__hot` macros to avoid the - `error: inlining failed in call to ‘always_inline FOO(...)’: target specific option mismatch` - issue during build using GCC >10.x for SH4 arch. - -Minors: - - - backport: Using the https://libmdbx.dqdkfa.ru/dead-github - for resources deleted by the Github' administration. - - backport: Fixed English typos. - - backport: Fixed proto of `__asan_default_options()`. - - backport: Fixed doxygen-description of C++ API, especially of C++20 concepts. - - backport: Refined `const` and `noexcept` for few C++ API methods. - - backport: Fixed copy&paste typo of "Getting started". - - backport: Update MithrilDB status. - - backport: Resolve false-posirive `used uninitialized` warning from GCC >10.x - while build for SH4 arch. - - --------------------------------------------------------------------------------- - - -## v0.11.13 at "Swashplate" 2022-11-10 - -The stable bugfix release in memory of [Boris Yuryev](https://ru.wikipedia.org/wiki/Юрьев,_Борис_Николаевич) on his 133rd birthday. - -``` -30 files changed, 405 insertions(+), 136 deletions(-) -Signed-off-by: Леонид Юрьев (Leonid Yuriev) -``` - -Fixes: - - - Fixed builds with older libc versions after using `fcntl64()` (backport). - - Fixed builds with older `stdatomic.h` versions, - where the `ATOMIC_*_LOCK_FREE` macros mistakenly redefined using functions (backport). - - Added workaround for `mremap()` defect to avoid assertion failure (backport). - - Workaround for `encryptfs` bug(s) in the `copy_file_range` implementation (backport). - - Fixed unexpected `MDBX_BUSY` from `mdbx_env_set_option()`, `mdbx_env_set_syncbytes()` - and `mdbx_env_set_syncperiod()` (backport). - - CMake requirements lowered to version 3.0.2 (backport). - -Minors: - - - Minor clarification output of `--help` for `mdbx_test` (backport). - - Added admonition of insecure for RISC-V (backport). - - Stochastic scripts and CMake files synchronized with the `devel` branch. - - Use `--dont-check-ram-size` for small-tests make-targets (backport). - - --------------------------------------------------------------------------------- - - -## v0.11.12 "Эребуни" at 2022-10-12 - -The stable bugfix release. - -``` -11 files changed, 96 insertions(+), 49 deletions(-) -Signed-off-by: Леонид Юрьев (Leonid Yuriev) -``` - -Fixes: - - - Fixed static assertion failure on platforms where the `off_t` type is wider - than corresponding fields of `struct flock` used for file locking (backport). - Now _libmdbx_ will use `fcntl64(F_GETLK64/F_SETLK64/F_SETLKW64)` if available. - - Fixed assertion check inside `page_retire_ex()` (backport). - -Minors: - - - Fixed `-Wint-to-pointer-cast` warnings while casting to `mdbx_tid_t` (backport). - - Removed needless `LockFileEx()` inside `mdbx_env_copy()` (backport). - - --------------------------------------------------------------------------------- - - -## v0.11.11 "Тендра-1790" at 2022-09-11 - -The stable bugfix release. - -``` -10 files changed, 38 insertions(+), 21 deletions(-) -Signed-off-by: Леонид Юрьев (Leonid Yuriev) -``` - -Fixes: - - - Fixed an extra check for `MDBX_APPENDDUP` inside `mdbx_cursor_put()` which could result in returning `MDBX_EKEYMISMATCH` for valid cases. - - Fixed an extra ensure/assertion check of `oldest_reader` inside `mdbx_txn_end()`. - - Fixed derived C++ builds by removing `MDBX_INTERNAL_FUNC` for `mdbx_w2mb()` and `mdbx_mb2w()`. - - --------------------------------------------------------------------------------- - - -## v0.11.10 "the TriColor" at 2022-08-22 - -The stable bugfix release. - -``` -14 files changed, 263 insertions(+), 252 deletions(-) -Signed-off-by: Леонид Юрьев (Leonid Yuriev) -``` - -New: - - - The C++ API has been refined to simplify support for `wchar_t` in path names. - - Added explicit error message for Buildroot's Microblaze toolchain maintainers. - -Fixes: - - - Never use modern `__cxa_thread_atexit()` on Apple's OSes. - - Use `MultiByteToWideChar(CP_THREAD_ACP)` instead of `mbstowcs()`. - - Don't check owner for finished transactions. - - Fixed typo in `MDBX_EINVAL` which breaks MingGW builds with CLANG. - -Minors: - - - Fixed variable name typo. - - Using `ldd` to check used dso. - - Added `MDBX_WEAK_IMPORT_ATTRIBUTE` macro. - - Use current transaction geometry for untouched parameters when `env_set_geometry()` called within a write transaction. - - Minor clarified `iov_page()` failure case. - - --------------------------------------------------------------------------------- - - -## v0.11.9 "Чирчик-1992" at 2022-08-02 - -The stable bugfix release. - -``` -18 files changed, 318 insertions(+), 178 deletions(-) -Signed-off-by: Леонид Юрьев (Leonid Yuriev) -``` - -Acknowledgments: - - - [Alex Sharov](https://github.com/AskAlexSharov) and Erigon team for reporting and testing. - - [Andrew Ashikhmin](https://gitflic.ru/user/yperbasis) for contributing. - -New: - - - Ability to customise `MDBX_LOCK_SUFFIX`, `MDBX_DATANAME`, `MDBX_LOCKNAME` just by predefine ones during build. - - Added to [`mdbx::env_managed`](https://libmdbx.dqdkfa.ru/group__cxx__api.html#classmdbx_1_1env__managed)'s methods a few overloads with `const char* pathname` parameter (C++ API). - -Fixes: - - - Fixed hang copy-with-compactification of a corrupted DB - or in case the volume of output pages is a multiple of `MDBX_ENVCOPY_WRITEBUF`. - - Fixed standalone non-CMake build on MacOS (`#include AvailabilityMacros.h>`). - - Fixed unexpected `MDBX_PAGE_FULL` error in rare cases with large database page sizes. - -Minors: - - - Minor fixes Doxygen references, comments, descriptions, etc. - - Fixed copy&paste typo inside `meta_checktxnid()`. - - Minor fix `meta_checktxnid()` to avoid assertion in debug mode. - - Minor fix `mdbx_env_set_geometry()` to avoid returning `EINVAL` in particular rare cases. - - Minor refine/fix batch-get testcase for large page size. - - Added `--pagesize NN` option to long-stotastic test script. - - Updated Valgrind-suppressions file for modern GCC. - - Fixed `has no symbols` warning from Apple's ranlib. - - --------------------------------------------------------------------------------- - - -## v0.11.8 "Baked Apple" at 2022-06-12 - -The stable release with an important fixes and workaround for the critical macOS thread-local-storage issue. - -Acknowledgments: - - - [Masatoshi Fukunaga](https://github.com/mah0x211) for [Lua bindings](https://github.com/mah0x211/lua-libmdbx). - -New: - - - Added most of transactions flags to the public API. - - Added `MDBX_NOSUCCESS_EMPTY_COMMIT` build option to return non-success result (`MDBX_RESULT_TRUE`) on empty commit. - - Reworked validation and import of DBI-handles into a transaction. - Assumes these changes will be invisible to most users, but will cause fewer surprises in complex DBI cases. - - Added ability to open DB in without-LCK (exclusive read-only) mode in case no permissions to create/write LCK-file. - -Fixes: - - - A series of fixes and improvements for automatically generated documentation (Doxygen). - - Fixed copy&paste bug with could lead to `SIGSEGV` (nullptr dereference) in the exclusive/no-lck mode. - - Fixed minor warnings from modern Apple's CLANG 13. - - Fixed minor warnings from CLANG 14 and in-development CLANG 15. - - Fixed `SIGSEGV` regression in without-LCK (exclusive read-only) mode. - - Fixed `mdbx_check_fs_local()` for CDROM case on Windows. - - Fixed nasty typo of typename which caused false `MDBX_CORRUPTED` error in a rare execution path, - when the size of the thread-ID type not equal to 8. - - Fixed Elbrus/E2K LCC 1.26 compiler warnings (memory model for atomic operations, etc). - - Fixed write-after-free memory corruption on latest `macOS` during finalization/cleanup of thread(s) that executed read transaction(s). - > The issue was suddenly discovered by a [CI](https://en.wikipedia.org/wiki/Continuous_integration) - > after adding an iteration with macOS 11 "Big Sur", and then reproduced on recent release of macOS 12 "Monterey". - > The issue was never noticed nor reported on macOS 10 "Catalina" nor others. - > Analysis shown that the problem caused by a change in the behavior of the system library (internals of dyld and pthread) - > during thread finalization/cleanup: now a memory allocated for a `__thread` variable(s) is released - > before execution of the registered Thread-Local-Storage destructor(s), - > thus a TLS-destructor will write-after-free just by legitime dereference any `__thread` variable. - > This is unexpected crazy-like behavior since the order of resources releasing/destroying - > is not the reverse of ones acquiring/construction order. Nonetheless such surprise - > is now workarounded by using atomic compare-and-swap operations on a 64-bit signatures/cookies. - -Minors: - - - Refined `release-assets` GNU Make target. - - Added logging to `mdbx_fetch_sdb()` to help debugging complex DBI-handels use cases. - - Added explicit error message from probe of no-support for `std::filesystem`. - - Added contributors "score" table by `git fame` to generated docs. - - Added `mdbx_assert_fail()` to public API (mostly for backtracing). - - Now C++20 concepts used/enabled only when `__cpp_lib_concepts >= 202002`. - - Don't provide nor report package information if used as a CMake subproject. - - --------------------------------------------------------------------------------- - - -## v0.11.7 "Resurrected Sarmat" at 2022-04-22 - -The stable risen release after the Github's intentional malicious disaster. - -#### We have migrated to a reliable trusted infrastructure -The origin for now is at [GitFlic](https://gitflic.ru/project/erthink/libmdbx) -since on 2022-04-15 the Github administration, without any warning nor -explanation, deleted _libmdbx_ along with a lot of other projects, -simultaneously blocking access for many developers. -For the same reason ~~Github~~ is blacklisted forever. - -GitFlic already support Russian and English languages, plan to support more, -including 和 中文. You are welcome! - -New: - - - Added the `tools-static` make target to build statically linked MDBX tools. - - Support for Microsoft Visual Studio 2022. - - Support build by MinGW' make from command line without CMake. - - Added `mdbx::filesystem` C++ API namespace that corresponds to `std::filesystem` or `std::experimental::filesystem`. - - Created [website](https://libmdbx.dqdkfa.ru/) for online auto-generated documentation. - - Used `https://web.archive.org/web/https://github.com/erthink/libmdbx` for dead (or temporarily lost) resources deleted by ~~Github~~. - - Added `--loglevel=` command-line option to the `mdbx_test` tool. - - Added few fast smoke-like tests into CMake builds. - -Fixes: - - - Fixed a race between starting a transaction and creating a DBI descriptor that could lead to `SIGSEGV` in the cursor tracking code. - - Clarified description of `MDBX_EPERM` error returned from `mdbx_env_set_geometry()`. - - Fixed non-promoting the parent transaction to be dirty in case the undo of the geometry update failed during abortion of a nested transaction. - - Resolved linking issues with `libstdc++fs`/`libc++fs`/`libc++experimental` for C++ `std::filesystem` or `std::experimental::filesystem` for legacy compilers. - - Added workaround for GNU Make 3.81 and earlier. - - Added workaround for Elbrus/LCC 1.25 compiler bug of class inline `static constexpr` member field. - - [Fixed](https://github.com/ledgerwatch/erigon/issues/3874) minor assertion regression (only debug builds were affected). - - Fixed detection of `C++20` concepts accessibility. - - Fixed detection of Clang's LTO availability for Android. - - Fixed extra definition of `_FILE_OFFSET_BITS=64` for Android that is problematic for 32-bit Bionic. - - Fixed build for ARM/ARM64 by MSVC. - - Fixed non-x86 Windows builds with `MDBX_WITHOUT_MSVC_CRT=ON` and `MDBX_BUILD_SHARED_LIBRARY=ON`. - -Minors: - - - Resolve minor MSVC warnings: avoid `/INCREMENTAL[:YES]` with `/LTCG`, `/W4` with `/W3`, the `C5105` warning. - - Switched to using `MDBX_EPERM` instead of `MDBX_RESULT_TRUE` to indicate that the geometry cannot be updated. - - Added `NULL` checking during memory allocation inside `mdbx_chk`. - - Resolved all warnings from MinGW while used without CMake. - - Added inheritable `target_include_directories()` to `CMakeLists.txt` for easy integration. - - Added build-time checks and paranoid runtime assertions for the `off_t` arguments of `fcntl()` which are used for locking. - - Added `-Wno-lto-type-mismatch` to avoid false-positive warnings from old GCC during LTO-enabled builds. - - Added checking for TID (system thread id) to avoid hang on 32-bit Bionic/Android within `pthread_mutex_lock()`. - - Reworked `MDBX_BUILD_TARGET` of CMake builds. - - Added `CMAKE_HOST_ARCH` and `CMAKE_HOST_CAN_RUN_EXECUTABLES_BUILT_FOR_TARGET`. - - --------------------------------------------------------------------------------- - - -## v0.11.6 at 2022-03-24 - -The stable release with the complete workaround for an incoherence flaw of Linux unified page/buffer cache. -Nonetheless the cause for this trouble may be an issue of Intel CPU cache/MESI. -See [issue#269](https://libmdbx.dqdkfa.ru/dead-github/issues/269) for more information. - -Acknowledgments: - - - [David Bouyssié](https://github.com/david-bouyssie) for [Scala bindings](https://github.com/david-bouyssie/mdbx4s). - - [Michelangelo Riccobene](https://github.com/mriccobene) for reporting and testing. - -Fixes: - - - [Added complete workaround](https://libmdbx.dqdkfa.ru/dead-github/issues/269) for an incoherence flaw of Linux unified page/buffer cache. - - [Fixed](https://libmdbx.dqdkfa.ru/dead-github/issues/272) cursor reusing for read-only transactions. - - Fixed copy&paste typo inside `mdbx::cursor::find_multivalue()`. - -Minors: - - - Minor refine C++ API for convenience. - - Minor internals refines. - - Added `lib-static` and `lib-shared` targets for make. - - Added minor workaround for AppleClang 13.3 bug. - - Clarified error messages of a signature/version mismatch. - - --------------------------------------------------------------------------------- - - -## v0.11.5 at 2022-02-23 - -The release with the temporary hotfix for a flaw of Linux unified page/buffer cache. -See [issue#269](https://libmdbx.dqdkfa.ru/dead-github/issues/269) for more information. - -Acknowledgments: - - - [Simon Leier](https://github.com/leisim) for reporting and testing. - - [Kai Wetlesen](https://github.com/kaiwetlesen) for [RPMs](http://copr.fedorainfracloud.org/coprs/kwetlesen/libmdbx/). - - [Tullio Canepa](https://github.com/canepat) for reporting C++ API issue and contributing. - -Fixes: - - - [Added hotfix](https://libmdbx.dqdkfa.ru/dead-github/issues/269) for a flaw of Linux unified page/buffer cache. - - [Fixed/Reworked](https://libmdbx.dqdkfa.ru/dead-github/pull/270) move-assignment operators for "managed" classes of C++ API. - - Fixed potential `SIGSEGV` while open DB with overrided non-default page size. - - [Made](https://libmdbx.dqdkfa.ru/dead-github/issues/267) `mdbx_env_open()` idempotence in failure cases. - - Refined/Fixed pages reservation inside `mdbx_update_gc()` to avoid non-reclamation in a rare cases. - - Fixed typo in a retained space calculation for the hsr-callback. - -Minors: - - - Reworked functions for meta-pages, split-off non-volatile. - - Disentangled C11-atomic fences/barriers and pure-functions (with `__attribute__((__pure__))`) to avoid compiler misoptimization. - - Fixed hypotetic unaligned access to 64-bit dwords on ARM with `__ARM_FEATURE_UNALIGNED` defined. - - Reasonable paranoia that makes clarity for code readers. - - Minor fixes Doxygen references, comments, descriptions, etc. - - --------------------------------------------------------------------------------- - - -## v0.11.4 at 2022-02-02 - -The stable release with fixes for large and huge databases sized of 4..128 TiB. - -Acknowledgments: - - - [Ledgerwatch](https://github.com/ledgerwatch), [Binance](https://github.com/binance-chain) and [Positive Technologies](https://www.ptsecurity.com/) teams for reporting, assistance in investigation and testing. - - [Alex Sharov](https://github.com/AskAlexSharov) for reporting, testing and provide resources for remote debugging/investigation. - - [Kris Zyp](https://github.com/kriszyp) for [Deno](https://deno.land/) support. - -New features, extensions and improvements: - - - Added treating the `UINT64_MAX` value as maximum for given option inside `mdbx_env_set_option()`. - - Added `to_hex/to_base58/to_base64::output(std::ostream&)` overloads without using temporary string objects as buffers. - - Added `--geometry-jitter=YES|no` option to the test framework. - - Added support for [Deno](https://deno.land/) support by [Kris Zyp](https://github.com/kriszyp). - -Fixes: - - - Fixed handling `MDBX_opt_rp_augment_limit` for GC's records from huge transactions (Erigon/Akula/Ethereum). - - [Fixed](https://libmdbx.dqdkfa.ru/dead-github/issues/258) build on Android (avoid including `sys/sem.h`). - - [Fixed](https://libmdbx.dqdkfa.ru/dead-github/pull/261) missing copy assignment operator for `mdbx::move_result`. - - Fixed missing `&` for `std::ostream &operator<<()` overloads. - - Fixed unexpected `EXDEV` (Cross-device link) error from `mdbx_env_copy()`. - - Fixed base64 encoding/decoding bugs in auxillary C++ API. - - Fixed overflow of `pgno_t` during checking PNL on 64-bit platforms. - - [Fixed](https://libmdbx.dqdkfa.ru/dead-github/issues/260) excessive PNL checking after sort for spilling. - - Reworked checking `MAX_PAGENO` and DB upper-size geometry limit. - - [Fixed](https://libmdbx.dqdkfa.ru/dead-github/issues/265) build for some combinations of versions of MSVC and Windows SDK. - -Minors: - - - Added workaround for CLANG bug [D79919/PR42445](https://reviews.llvm.org/D79919). - - Fixed build test on Android (using `pthread_barrier_t` stub). - - Disabled C++20 concepts for CLANG < 14 on Android. - - Fixed minor `unused parameter` warning. - - Added CI for Android. - - Refine/cleanup internal logging. - - Refined line splitting inside hex/base58/base64 encoding to avoid `\n` at the end. - - Added workaround for modern libstdc++ with CLANG < 4.x - - Relaxed txn-check rules for auxiliary functions. - - Clarified a comments and descriptions, etc. - - Using the `-fno-semantic interposition` option to reduce the overhead to calling self own public functions. - - --------------------------------------------------------------------------------- - - -## v0.11.3 at 2021-12-31 - -Acknowledgments: - - - [gcxfd ](https://github.com/gcxfd) for reporting, contributing and testing. - - [장세연 (Чан Се Ен)](https://github.com/sasgas) for reporting and testing. - - [Alex Sharov](https://github.com/AskAlexSharov) for reporting, testing and provide resources for remote debugging/investigation. - -New features, extensions and improvements: - - - [Added](https://libmdbx.dqdkfa.ru/dead-github/issues/236) `mdbx_cursor_get_batch()`. - - [Added](https://libmdbx.dqdkfa.ru/dead-github/issues/250) `MDBX_SET_UPPERBOUND`. - - C++ API is finalized now. - - The GC update stage has been [significantly speeded](https://libmdbx.dqdkfa.ru/dead-github/issues/254) when fixing huge Erigon's transactions (Ethereum ecosystem). - -Fixes: - - - Disabled C++20 concepts for stupid AppleClang 13.x - - Fixed internal collision of `MDBX_SHRINK_ALLOWED` with `MDBX_ACCEDE`. - -Minors: - - - Fixed returning `MDBX_RESULT_TRUE` (unexpected -1) from `mdbx_env_set_option()`. - - Added `mdbx_env_get_syncbytes()` and `mdbx_env_get_syncperiod()`. - - [Clarified](https://libmdbx.dqdkfa.ru/dead-github/pull/249) description of `MDBX_INTEGERKEY`. - - Reworked/simplified `mdbx_env_sync_internal()`. - - [Fixed](https://libmdbx.dqdkfa.ru/dead-github/issues/248) extra assertion inside `mdbx_cursor_put()` for `MDBX_DUPFIXED` cases. - - Avoiding extra looping inside `mdbx_env_info_ex()`. - - Explicitly enabled core dumps from stochastic tests scripts on Linux. - - [Fixed](https://libmdbx.dqdkfa.ru/dead-github/issues/253) `mdbx_override_meta()` to avoid false-positive assertions. - - For compatibility reverted returning `MDBX_ENODATA`for some cases. - - --------------------------------------------------------------------------------- - - -## v0.11.2 at 2021-12-02 - -Acknowledgments: - - - [장세연 (Чан Се Ен)](https://github.com/sasgas) for contributing to C++ API. - - [Alain Picard](https://github.com/castortech) for [Java bindings](https://github.com/castortech/mdbxjni). - - [Alex Sharov](https://github.com/AskAlexSharov) for reporting and testing. - - [Kris Zyp](https://github.com/kriszyp) for reporting and testing. - - [Artem Vorotnikov](https://github.com/vorot93) for support [Rust wrapper](https://github.com/vorot93/libmdbx-rs). - -Fixes: - - - [Fixed compilation](https://libmdbx.dqdkfa.ru/dead-github/pull/239) with `devtoolset-9` on CentOS/RHEL 7. - - [Fixed unexpected `MDBX_PROBLEM` error](https://libmdbx.dqdkfa.ru/dead-github/issues/242) because of update an obsolete meta-page. - - [Fixed returning `MDBX_NOTFOUND` error](https://libmdbx.dqdkfa.ru/dead-github/issues/243) in case an inexact value found for `MDBX_GET_BOTH` operation. - - [Fixed compilation](https://libmdbx.dqdkfa.ru/dead-github/issues/245) without kernel/libc-devel headers. - -Minors: - - - Fixed `constexpr`-related macros for legacy compilers. - - Allowed to define 'CMAKE_CXX_STANDARD` using an environment variable. - - Simplified collection statistics of page operation . - - Added `MDBX_FORCE_BUILD_AS_MAIN_PROJECT` cmake option. - - Remove unneeded `#undef P_DIRTY`. - - --------------------------------------------------------------------------------- - - -## v0.11.1 at 2021-10-23 - -### Backward compatibility break: - -The database format signature has been changed to prevent -forward-interoperability with an previous releases, which may lead to a -[false positive diagnosis of database corruption](https://libmdbx.dqdkfa.ru/dead-github/issues/238) -due to flaws of an old library versions. - -This change is mostly invisible: - - - previously versions are unable to read/write a new DBs; - - but the new release is able to handle an old DBs and will silently upgrade ones. - -Acknowledgments: - - - [Alex Sharov](https://github.com/AskAlexSharov) for reporting and testing. - - -******************************************************************************** - - -## v0.10.5 at 2021-10-13 (obsolete, please use v0.11.1) - -Unfortunately, the `v0.10.5` accidentally comes not full-compatible with previous releases: - - - `v0.10.5` can read/processing DBs created by previous releases, i.e. the backward-compatibility is provided; - - however, previous releases may lead to false-corrupted state with DB that was touched by `v0.10.5`, i.e. the forward-compatibility is broken for `v0.10.4` and earlier. - -This cannot be fixed, as it requires fixing past versions, which as a result we will just get a current version. -Therefore, it is recommended to use `v0.11.1` instead of `v0.10.5`. - -Acknowledgments: - - - [Noel Kuntze](https://github.com/Thermi) for immediately bug reporting. - -Fixes: - - - Fixed unaligned access regression after the `#pragma pack` fix for modern compilers. - - Added UBSAN-test to CI to avoid a regression(s) similar to lately fixed. - - Fixed possibility of meta-pages clashing after manually turn to a particular meta-page using `mdbx_chk` utility. - -Minors: - - - Refined handling of weak or invalid meta-pages while a DB opening. - - Refined providing information for the `@MAIN` and `@GC` sub-databases of a last committed modification transaction's ID. - - --------------------------------------------------------------------------------- - - -## v0.10.4 at 2021-10-10 - -Acknowledgments: - - - [Artem Vorotnikov](https://github.com/vorot93) for support [Rust wrapper](https://github.com/vorot93/libmdbx-rs). - - [Andrew Ashikhmin](https://github.com/yperbasis) for contributing to C++ API. - -Fixes: - - - Fixed possibility of looping update GC during transaction commit (no public issue since the problem was discovered inside [Positive Technologies](https://www.ptsecurity.ru)). - - Fixed `#pragma pack` to avoid provoking some compilers to generate code with [unaligned access](https://libmdbx.dqdkfa.ru/dead-github/issues/235). - - Fixed `noexcept` for potentially throwing `txn::put()` of C++ API. - -Minors: - - - Added stochastic test script for checking small transactions cases. - - Removed extra transaction commit/restart inside test framework. - - In debugging builds fixed a too small (single page) by default DB shrink threshold. - - --------------------------------------------------------------------------------- - - -## v0.10.3 at 2021-08-27 - -Acknowledgments: - - - [Francisco Vallarino](https://github.com/fjvallarino) for [Haskell bindings for libmdbx](https://hackage.haskell.org/package/libmdbx). - - [Alex Sharov](https://github.com/AskAlexSharov) for reporting and testing. - - [Andrea Lanfranchi](https://github.com/AndreaLanfranchi) for contributing. - -Extensions and improvements: - - - Added `cursor::erase()` overloads for `key` and for `key-value`. - - Resolve minor Coverity Scan issues (no fixes but some hint/comment were added). - - Resolve minor UndefinedBehaviorSanitizer issues (no fixes but some workaround were added). - -Fixes: - - - Always setup `madvise` while opening DB (fixes https://libmdbx.dqdkfa.ru/dead-github/issues/231). - - Fixed checking legacy `P_DIRTY` flag (`0x10`) for nested/sub-pages. - -Minors: - - - Fixed getting revision number from middle of history during amalgamation (GNU Makefile). - - Fixed search GCC tools for LTO (CMake scripts). - - Fixed/reorder dirs list for search CLANG tools for LTO (CMake scripts). - - Fixed/workarounds for CLANG < 9.x - - Fixed CMake warning about compatibility with 3.8.2 - - --------------------------------------------------------------------------------- - - -## v0.10.2 at 2021-07-26 - -Acknowledgments: - - - [Alex Sharov](https://github.com/AskAlexSharov) for reporting and testing. - - [Andrea Lanfranchi](https://github.com/AndreaLanfranchi) for reporting bugs. - - [Lionel Debroux](https://github.com/debrouxl) for fuzzing tests and reporting bugs. - - [Sergey Fedotov](https://github.com/SergeyFromHell/) for [`node-mdbx` NodeJS bindings](https://www.npmjs.com/package/node-mdbx). - - [Kris Zyp](https://github.com/kriszyp) for [`lmdbx-store` NodeJS bindings](https://github.com/kriszyp/lmdbx-store). - - [Noel Kuntze](https://github.com/Thermi) for [draft Python bindings](https://libmdbx.dqdkfa.ru/dead-github/commits/python-bindings). - -New features, extensions and improvements: - - - [Allow to predefine/override `MDBX_BUILD_TIMESTAMP` for builds reproducibility](https://libmdbx.dqdkfa.ru/dead-github/issues/201). - - Added options support for `long-stochastic` script. - - Avoided `MDBX_TXN_FULL` error for large transactions when possible. - - The `MDBX_READERS_LIMIT` increased to `32767`. - - Raise `MDBX_TOO_LARGE` under Valgrind/ASAN if being opened DB is 100 larger than RAM (to avoid hangs and OOM). - - Minimized the size of poisoned/unpoisoned regions to avoid Valgrind/ASAN stuck. - - Added more workarounds for QEMU for testing builds for 32-bit platforms, Alpha and Sparc architectures. - - `mdbx_chk` now skips iteration & checking of DB' records if corresponding page-tree is corrupted (to avoid `SIGSEGV`, ASAN failures, etc). - - Added more checks for [rare/fuzzing corruption cases](https://libmdbx.dqdkfa.ru/dead-github/issues/217). - -Backward compatibility break: - - - Use file `VERSION.txt` for version information instead of `VERSION` to avoid collision with `#include `. - - Rename `slice::from/to_FOO_bytes()` to `slice::envisage_from/to_FOO_length()'. - - Rename `MDBX_TEST_EXTRA` make's variable to `MDBX_SMOKE_EXTRA`. - - Some details of the C++ API have been changed for subsequent freezing. - -Fixes: - - - Fixed excess meta-pages checks in case `mdbx_chk` is called to check the DB for a specific meta page and thus could prevent switching to the selected meta page, even if the check passed without errors. - - Fixed [recursive use of SRW-lock on Windows cause by `MDBX_NOTLS` option](https://libmdbx.dqdkfa.ru/dead-github/issues/203). - - Fixed [log a warning during a new DB creation](https://libmdbx.dqdkfa.ru/dead-github/issues/205). - - Fixed [false-negative `mdbx_cursor_eof()` result](https://libmdbx.dqdkfa.ru/dead-github/issues/207). - - Fixed [`make install` with non-GNU `install` utility (OSX, BSD)](https://libmdbx.dqdkfa.ru/dead-github/issues/208). - - Fixed [installation by `CMake` in special cases by complete use `GNUInstallDirs`'s variables](https://libmdbx.dqdkfa.ru/dead-github/issues/209). - - Fixed [C++ Buffer issue with `std::string` and alignment](https://libmdbx.dqdkfa.ru/dead-github/issues/191). - - Fixed `safe64_reset()` for platforms without atomic 64-bit compare-and-swap. - - Fixed hang/shutdown on big-endian platforms without `__cxa_thread_atexit()`. - - Fixed [using bad meta-pages if DB was partially/recoverable corrupted](https://libmdbx.dqdkfa.ru/dead-github/issues/217). - - Fixed extra `noexcept` for `buffer::&assign_reference()`. - - Fixed `bootid` generation on Windows for case of change system' time. - - Fixed [test framework keygen-related issue](https://libmdbx.dqdkfa.ru/dead-github/issues/127). - - --------------------------------------------------------------------------------- - - -## v0.10.1 at 2021-06-01 - -Acknowledgments: - - - [Alexey Akhunov](https://github.com/AlexeyAkhunov) and [Alex Sharov](https://github.com/AskAlexSharov) for bug reporting and testing. - - [Andrea Lanfranchi](https://github.com/AndreaLanfranchi) for bug reporting and testing related to WSL2. - -New features: - - - Added `-p` option to `mdbx_stat` utility for printing page operations statistic. - - Added explicit checking for and warning about using unfit github's archives. - - Added fallback from [OFD locking](https://bit.ly/3yFRtYC) to legacy non-OFD POSIX file locks on an `EINVAL` error. - - Added [Plan 9](https://en.wikipedia.org/wiki/9P_(protocol)) network file system to the whitelist for an ability to open a DB in exclusive mode. - - Support for opening from WSL2 environment a DB hosted on Windows drive and mounted via [DrvFs](https://docs.microsoft.com/it-it/archive/blogs/wsl/wsl-file-system-support#drvfs) (i.e by Plan 9 noted above). - -Fixes: - - - Fixed minor "foo not used" warnings from modern C++ compilers when building the C++ part of the library. - - Fixed confusing/messy errors when build library from unfit github's archives (https://libmdbx.dqdkfa.ru/dead-github/issues/197). - - Fixed `#​e​l​s​i​f` typo. - - Fixed rare unexpected `MDBX_PROBLEM` error during altering data in huge transactions due to wrong spilling/oust of dirty pages (https://libmdbx.dqdkfa.ru/dead-github/issues/195). - - Re-Fixed WSL1/WSL2 detection with distinguishing (https://libmdbx.dqdkfa.ru/dead-github/issues/97). - - --------------------------------------------------------------------------------- - - -## v0.10.0 at 2021-05-09 - -Acknowledgments: - - - [Mahlon E. Smith](https://github.com/mahlonsmith) for [Ruby bindings](https://rubygems.org/gems/mdbx/). - - [Alex Sharov](https://github.com/AskAlexSharov) for [mdbx-go](https://github.com/torquem-ch/mdbx-go), bug reporting and testing. - - [Artem Vorotnikov](https://github.com/vorot93) for bug reporting and PR. - - [Paolo Rebuffo](https://www.linkedin.com/in/paolo-rebuffo-8255766/), [Alexey Akhunov](https://github.com/AlexeyAkhunov) and Mark Grosberg for donations. - - [Noel Kuntze](https://github.com/Thermi) for preliminary [Python bindings](https://github.com/Thermi/libmdbx/tree/python-bindings) - -New features: - - - Added `mdbx_env_set_option()` and `mdbx_env_get_option()` for controls - various runtime options for an environment (announce of this feature was missed in a previous news). - - Added `MDBX_DISABLE_PAGECHECKS` build option to disable some checks to reduce an overhead - and detection probability of database corruption to a values closer to the LMDB. - The `MDBX_DISABLE_PAGECHECKS=1` provides a performance boost of about 10% in CRUD scenarios, - and conjointly with the `MDBX_ENV_CHECKPID=0` and `MDBX_TXN_CHECKOWNER=0` options can yield - up to 30% more performance compared to LMDB. - - Using float point (exponential quantized) representation for internal 16-bit values - of grow step and shrink threshold when huge ones (https://libmdbx.dqdkfa.ru/dead-github/issues/166). - To minimize the impact on compatibility, only the odd values inside the upper half - of the range (i.e. 32769..65533) are used for the new representation. - - Added the `mdbx_drop` similar to LMDB command-line tool to purge or delete (sub)database(s). - - [Ruby bindings](https://rubygems.org/gems/mdbx/) is available now by [Mahlon E. Smith](https://github.com/mahlonsmith). - - Added `MDBX_ENABLE_MADVISE` build option which controls the use of POSIX `madvise()` hints and friends. - - The internal node sizes were refined, resulting in a reduction in large/overflow pages in some use cases - and a slight increase in limits for a keys size to ≈½ of page size. - - Added to `mdbx_chk` output number of keys/items on pages. - - Added explicit `install-strip` and `install-no-strip` targets to the `Makefile` (https://libmdbx.dqdkfa.ru/dead-github/pull/180). - - Major rework page splitting (af9b7b560505684249b76730997f9e00614b8113) for - - An "auto-appending" feature upon insertion for both ascending and - descending key sequences. As a result, the optimality of page filling - increases significantly (more densely, less slackness) while - inserting ordered sequences of keys, - - A "splitting at middle" to make page tree more balanced on average. - - Added `mdbx_get_sysraminfo()` to the API. - - Added guessing a reasonable maximum DB size for the default upper limit of geometry (https://libmdbx.dqdkfa.ru/dead-github/issues/183). - - Major rework internal labeling of a dirty pages (958fd5b9479f52f2124ab7e83c6b18b04b0e7dda) for - a "transparent spilling" feature with the gist to make a dirty pages - be ready to spilling (writing to a disk) without further altering ones. - Thus in the `MDBX_WRITEMAP` mode the OS kernel able to oust dirty pages - to DB file without further penalty during transaction commit. - As a result, page swapping and I/O could be significantly reduced during extra large transactions and/or lack of memory. - - Minimized reading leaf-pages during dropping subDB(s) and nested trees. - - Major rework a spilling of dirty pages to support [LRU](https://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU)) - policy and prioritization for a large/overflow pages. - - Statistics of page operations (split, merge, copy, spill, etc) now available through `mdbx_env_info_ex()`. - - Auto-setup limit for length of dirty pages list (`MDBX_opt_txn_dp_limit` option). - - Support `make options` to list available build options. - - Support `make help` to list available make targets. - - Silently `make`'s build by default. - - Preliminary [Python bindings](https://github.com/Thermi/libmdbx/tree/python-bindings) is available now - by [Noel Kuntze](https://github.com/Thermi) (https://libmdbx.dqdkfa.ru/dead-github/issues/147). - -Backward compatibility break: - - - The `MDBX_AVOID_CRT` build option was renamed to `MDBX_WITHOUT_MSVC_CRT`. - This option is only relevant when building for Windows. - - The `mdbx_env_stat()` always, and `mdbx_env_stat_ex()` when called with the zeroed transaction parameter, - now internally start temporary read transaction and thus may returns `MDBX_BAD_RSLOT` error. - So, just never use deprecated `mdbx_env_stat()' and call `mdbx_env_stat_ex()` with transaction parameter. - - The build option `MDBX_CONFIG_MANUAL_TLS_CALLBACK` was removed and now just a non-zero value of - the `MDBX_MANUAL_MODULE_HANDLER` macro indicates the requirement to manually call `mdbx_module_handler()` - when loading libraries and applications uses statically linked libmdbx on an obsolete Windows versions. - -Fixes: - - - Fixed performance regression due non-optimal C11 atomics usage (https://libmdbx.dqdkfa.ru/dead-github/issues/160). - - Fixed "reincarnation" of subDB after it deletion (https://libmdbx.dqdkfa.ru/dead-github/issues/168). - - Fixed (disallowing) implicit subDB deletion via operations on `@MAIN`'s DBI-handle. - - Fixed a crash of `mdbx_env_info_ex()` in case of a call for a non-open environment (https://libmdbx.dqdkfa.ru/dead-github/issues/171). - - Fixed the selecting/adjustment values inside `mdbx_env_set_geometry()` for implicit out-of-range cases (https://libmdbx.dqdkfa.ru/dead-github/issues/170). - - Fixed `mdbx_env_set_option()` for set initial and limit size of dirty page list ((https://libmdbx.dqdkfa.ru/dead-github/issues/179). - - Fixed an unreasonably huge default upper limit for DB geometry (https://libmdbx.dqdkfa.ru/dead-github/issues/183). - - Fixed `constexpr` specifier for the `slice::invalid()`. - - Fixed (no)readahead auto-handling (https://libmdbx.dqdkfa.ru/dead-github/issues/164). - - Fixed non-alloy build for Windows. - - Switched to using Heap-functions instead of LocalAlloc/LocalFree on Windows. - - Fixed `mdbx_env_stat_ex()` to returning statistics of the whole environment instead of MainDB only (https://libmdbx.dqdkfa.ru/dead-github/issues/190). - - Fixed building by GCC 4.8.5 (added workaround for a preprocessor's bug). - - Fixed building C++ part for iOS <= 13.0 (unavailability of `std::filesystem::path`). - - Fixed building for Windows target versions prior to Windows Vista (`WIN32_WINNT < 0x0600`). - - Fixed building by MinGW for Windows (https://libmdbx.dqdkfa.ru/dead-github/issues/155). - - -******************************************************************************** - - -## v0.9.3 at 2021-02-02 - -Acknowledgments: - - - [Mahlon E. Smith](http://www.martini.nu/) for [FreeBSD port of libmdbx](https://svnweb.freebsd.org/ports/head/databases/mdbx/). - - [장세연](http://www.castis.com) for bug fixing and PR. - - [Clément Renault](https://github.com/Kerollmops/heed) for [Heed](https://github.com/Kerollmops/heed) fully typed Rust wrapper. - - [Alex Sharov](https://github.com/AskAlexSharov) for bug reporting. - - [Noel Kuntze](https://github.com/Thermi) for bug reporting. - -Removed options and features: - - - Drop `MDBX_HUGE_TRANSACTIONS` build-option (now no longer required). - -New features: - - - Package for FreeBSD is available now by Mahlon E. Smith. - - New API functions to get/set various options (https://libmdbx.dqdkfa.ru/dead-github/issues/128): - - the maximum number of named databases for the environment; - - the maximum number of threads/reader slots; - - threshold (since the last unsteady commit) to force flush the data buffers to disk; - - relative period (since the last unsteady commit) to force flush the data buffers to disk; - - limit to grow a list of reclaimed/recycled page's numbers for finding a sequence of contiguous pages for large data items; - - limit to grow a cache of dirty pages for reuse in the current transaction; - - limit of a pre-allocated memory items for dirty pages; - - limit of dirty pages for a write transaction; - - initial allocation size for dirty pages list of a write transaction; - - maximal part of the dirty pages may be spilled when necessary; - - minimal part of the dirty pages should be spilled when necessary; - - how much of the parent transaction dirty pages will be spilled while start each child transaction; - - Unlimited/Dynamic size of retired and dirty page lists (https://libmdbx.dqdkfa.ru/dead-github/issues/123). - - Added `-p` option (purge subDB before loading) to `mdbx_load` tool. - - Reworked spilling of large transaction and committing of nested transactions: - - page spilling code reworked to avoid the flaws and bugs inherited from LMDB; - - limit for number of dirty pages now is controllable at runtime; - - a spilled pages, including overflow/large pages, now can be reused and refunded/compactified in nested transactions; - - more effective refunding/compactification especially for the loosed page cache. - - Added `MDBX_ENABLE_REFUND` and `MDBX_PNL_ASCENDING` internal/advanced build options. - - Added `mdbx_default_pagesize()` function. - - Better support architectures with a weak/relaxed memory consistency model (ARM, AARCH64, PPC, MIPS, RISC-V, etc) by means [C11 atomics](https://en.cppreference.com/w/c/atomic). - - Speed up page number lists and dirty page lists (https://libmdbx.dqdkfa.ru/dead-github/issues/132). - - Added `LIBMDBX_NO_EXPORTS_LEGACY_API` build option. - -Fixes: - - - Fixed missing cleanup (null assigned) in the C++ commit/abort (https://libmdbx.dqdkfa.ru/dead-github/pull/143). - - Fixed `mdbx_realloc()` for case of nullptr and `MDBX_WITHOUT_MSVC_CRT=ON` for Windows. - - Fixed the possibility to use invalid and renewed (closed & re-opened, dropped & re-created) DBI-handles (https://libmdbx.dqdkfa.ru/dead-github/issues/146). - - Fixed 4-byte aligned access to 64-bit integers, including access to the `bootid` meta-page's field (https://libmdbx.dqdkfa.ru/dead-github/issues/153). - - Fixed minor/potential memory leak during page flushing and unspilling. - - Fixed handling states of cursors's and subDBs's for nested transactions. - - Fixed page leak in extra rare case the list of retired pages changed during update GC on transaction commit. - - Fixed assertions to avoid false-positive UB detection by CLANG/LLVM (https://libmdbx.dqdkfa.ru/dead-github/issues/153). - - Fixed `MDBX_TXN_FULL` and regressive `MDBX_KEYEXIST` during large transaction commit with `MDBX_LIFORECLAIM` (https://libmdbx.dqdkfa.ru/dead-github/issues/123). - - Fixed auto-recovery (`weak->steady` with the same boot-id) when Database size at last weak checkpoint is large than at last steady checkpoint. - - Fixed operation on systems with unusual small/large page size, including PowerPC (https://libmdbx.dqdkfa.ru/dead-github/issues/157). - - --------------------------------------------------------------------------------- - - -## v0.9.2 at 2020-11-27 - -Acknowledgments: - - - Jens Alfke (Mobile Architect at [Couchbase](https://www.couchbase.com/)) for [NimDBX](https://github.com/snej/nimdbx). - - Clément Renault (CTO at [MeiliSearch](https://www.meilisearch.com/)) for [mdbx-rs](https://github.com/Kerollmops/mdbx-rs). - - Alex Sharov (Go-Lang Teach Lead at [TurboGeth/Ethereum](https://ethereum.org/)) for an extreme test cases and bug reporting. - - George Hazan (CTO at [Miranda NG](https://www.miranda-ng.org/)) for bug reporting. - - [Positive Technologies](https://www.ptsecurity.com/) for funding and [The Standoff](https://standoff365.com/). - -Added features: - - - Provided package for [buildroot](https://buildroot.org/). - - Binding for Nim is [available](https://github.com/snej/nimdbx) now by Jens Alfke. - - Added `mdbx_env_delete()` for deletion an environment files in a proper and multiprocess-safe way. - - Added `mdbx_txn_commit_ex()` with collecting latency information. - - Fast completion pure nested transactions. - - Added `LIBMDBX_INLINE_API` macro and inline versions of some API functions. - - Added `mdbx_cursor_copy()` function. - - Extended tests for checking cursor tracking. - - Added `MDBX_SET_LOWERBOUND` operation for `mdbx_cursor_get()`. - -Fixes: - - - Fixed missing installation of `mdbx.h++`. - - Fixed use of obsolete `__noreturn`. - - Fixed use of `yield` instruction on ARM if unsupported. - - Added pthread workaround for buggy toolchain/cmake/buildroot. - - Fixed use of `pthread_yield()` for non-GLIBC. - - Fixed use of `RegGetValueA()` on Windows 2000/XP. - - Fixed use of `GetTickCount64()` on Windows 2000/XP. - - Fixed opening DB on a network shares (in the exclusive mode). - - Fixed copy&paste typos. - - Fixed minor false-positive GCC warning. - - Added workaround for broken `DEFINE_ENUM_FLAG_OPERATORS` from Windows SDK. - - Fixed cursor state after multimap/dupsort repeated deletes (https://libmdbx.dqdkfa.ru/dead-github/issues/121). - - Added `SIGPIPE` suppression for internal thread during `mdbx_env_copy()`. - - Fixed extra-rare `MDBX_KEY_EXIST` error during `mdbx_commit()` (https://libmdbx.dqdkfa.ru/dead-github/issues/131). - - Fixed spilled pages checking (https://libmdbx.dqdkfa.ru/dead-github/issues/126). - - Fixed `mdbx_load` for 'plain text' and without `-s name` cases (https://libmdbx.dqdkfa.ru/dead-github/issues/136). - - Fixed save/restore/commit of cursors for nested transactions. - - Fixed cursors state in rare/special cases (move next beyond end-of-data, after deletion and so on). - - Added workaround for MSVC 19.28 (Visual Studio 16.8) (but may still hang during compilation). - - Fixed paranoidal Clang C++ UB for bitwise operations with flags defined by enums. - - Fixed large pages checking (for compatibility and to avoid false-positive errors from `mdbx_chk`). - - Added workaround for Wine (https://github.com/miranda-ng/miranda-ng/issues/1209). - - Fixed `ERROR_NOT_SUPPORTED` while opening DB by UNC pathnames (https://github.com/miranda-ng/miranda-ng/issues/2627). - - Added handling `EXCEPTION_POSSIBLE_DEADLOCK` condition for Windows. - - --------------------------------------------------------------------------------- - - -## v0.9.1 2020-09-30 - -Added features: - - - Preliminary C++ API with support for C++17 polymorphic allocators. - - [Online C++ API reference](https://libmdbx.dqdkfa.ru/) by Doxygen. - - Quick reference for Insert/Update/Delete operations. - - Explicit `MDBX_SYNC_DURABLE` to sync modes for API clarity. - - Explicit `MDBX_ALLDUPS` and `MDBX_UPSERT` for API clarity. - - Support for read transactions preparation (`MDBX_TXN_RDONLY_PREPARE` flag). - - Support for cursor preparation/(pre)allocation and reusing (`mdbx_cursor_create()` and `mdbx_cursor_bind()` functions). - - Support for checking database using specified meta-page (see `mdbx_chk -h`). - - Support for turn to the specific meta-page after checking (see `mdbx_chk -h`). - - Support for explicit reader threads (de)registration. - - The `mdbx_txn_break()` function to explicitly mark a transaction as broken. - - Improved handling of corrupted databases by `mdbx_chk` utility and `mdbx_walk_tree()` function. - - Improved DB corruption detection by checking parent-page-txnid. - - Improved opening large DB (> 4Gb) from 32-bit code. - - Provided `pure-function` and `const-function` attributes to C API. - - Support for user-settable context for transactions & cursors. - - Revised API and documentation related to Handle-Slow-Readers callback feature. - -Deprecated functions and flags: - - - For clarity and API simplification the `MDBX_MAPASYNC` flag is deprecated. - Just use `MDBX_SAFE_NOSYNC` or `MDBX_UTTERLY_NOSYNC` instead of it. - - `MDBX_oom_func`, `mdbx_env_set_oomfunc()` and `mdbx_env_get_oomfunc()` - replaced with `MDBX_hsr_func`, `mdbx_env_get_hsr` and `mdbx_env_get_hsr()`. - -Fixes: - - - Fix `mdbx_strerror()` for `MDBX_BUSY` error (no error description is returned). - - Fix update internal meta-geo information in read-only mode (`EACCESS` or `EBADFD` error). - - Fix `mdbx_page_get()` null-defer when DB corrupted (crash by `SIGSEGV`). - - Fix `mdbx_env_open()` for re-opening after non-fatal errors (`mdbx_chk` unexpected failures). - - Workaround for MSVC 19.27 `static_assert()` bug. - - Doxygen descriptions and refinement. - - Update Valgrind's suppressions. - - Workaround to avoid infinite loop of 'nested' testcase on MIPS under QEMU. - - Fix a lot of typos & spelling (Thanks to Josh Soref for PR). - - Fix `getopt()` messages for Windows (Thanks to Andrey Sporaw for reporting). - - Fix MSVC compiler version requirements (Thanks to Andrey Sporaw for reporting). - - Workarounds for QEMU's bugs to run tests for cross-built[A library under QEMU. - - Now C++ compiler optional for building by CMake. - - --------------------------------------------------------------------------------- - - -## v0.9.0 2020-07-31 (not a release, but API changes) - -Added features: - - - [Online C API reference](https://libmdbx.dqdkfa.ru/) by Doxygen. - - Separated enums for environment, sub-databases, transactions, copying and data-update flags. - -Deprecated functions and flags: - - - Usage of custom comparators and the `mdbx_dbi_open_ex()` are deprecated, since such databases couldn't be checked by the `mdbx_chk` utility. - Please use the value-to-key functions to provide keys that are compatible with the built-in libmdbx comparators. - - -******************************************************************************** - - -## 2020-07-06 - - - Added support multi-opening the same DB in a process with SysV locking (BSD). - - Fixed warnings & minors for LCC compiler (E2K). - - Enabled to simultaneously open the same database from processes with and without the `MDBX_WRITEMAP` option. - - Added key-to-value, `mdbx_get_keycmp()` and `mdbx_get_datacmp()` functions (helpful to avoid using custom comparators). - - Added `ENABLE_UBSAN` CMake option to enabling the UndefinedBehaviorSanitizer from GCC/CLANG. - - Workaround for [CLANG bug](https://bugs.llvm.org/show_bug.cgi?id=43275). - - Returning `MDBX_CORRUPTED` in case all meta-pages are weak and no other error. - - Refined mode bits while auto-creating LCK-file. - - Avoids unnecessary database file re-mapping in case geometry changed by another process(es). - From the user's point of view, the `MDBX_UNABLE_EXTEND_MAPSIZE` error will now be returned less frequently and only when using the DB in the current process really requires it to be reopened. - - Remapping on-the-fly and of the database file was implemented. - Now remapping with a change of address is performed automatically if there are no dependent readers in the current process. - - -## 2020-06-12 - - - Minor change versioning. The last number in the version now means the number of commits since last release/tag. - - Provide ChangeLog file. - - Fix for using libmdbx as a C-only sub-project with CMake. - - Fix `mdbx_env_set_geometry()` for case it is called from an opened environment outside of a write transaction. - - Add support for huge transactions and `MDBX_HUGE_TRANSACTIONS` build-option (default `OFF`). - - Refine LTO (link time optimization) for clang. - - Force enabling exceptions handling for MSVC (`/EHsc` option). - - -## 2020-06-05 - - - Support for Android/Bionic. - - Support for iOS. - - Auto-handling `MDBX_NOSUBDIR` while opening for any existing database. - - Engage github-actions to make release-assets. - - Clarify API description. - - Extended keygen-cases in stochastic test. - - Fix fetching of first/lower key from LEAF2-page during page merge. - - Fix missing comma in array of error messages. - - Fix div-by-zero while copy-with-compaction for non-resizable environments. - - Fixes & enhancements for custom-comparators. - - Fix `MDBX_WITHOUT_MSVC_CRT` option and missing `ntdll.def`. - - Fix `mdbx_env_close()` to work correctly called concurrently from several threads. - - Fix null-deref in an ASAN-enabled builds while opening the environment with error and/or read-only. - - Fix AddressSanitizer errors after closing the environment. - - Fix/workaround to avoid GCC 10.x pedantic warnings. - - Fix using `ENODATA` for FreeBSD. - - Avoid invalidation of DBI-handle(s) when it just closes. - - Avoid using `pwritev()` for single-writes (up to 10% speedup for some kernels & scenarios). - - Avoiding `MDBX_UTTERLY_NOSYNC` as result of flags merge. - - Add `mdbx_dbi_dupsort_depthmask()` function. - - Add `MDBX_CP_FORCE_RESIZABLE` option. - - Add deprecated `MDBX_MAP_RESIZED` for compatibility. - - Add `MDBX_BUILD_TOOLS` option (default `ON`). - - Refine `mdbx_dbi_open_ex()` to safe concurrently opening the same handle from different threads. - - Truncate clk-file during environment closing. So a zero-length lck-file indicates that the environment was closed properly. - - Refine `mdbx_update_gc()` for huge transactions with small sizes of database page. - - Extends dump/load to support all MDBX attributes. - - Avoid upsertion the same key-value data, fix related assertions. - - Rework min/max length checking for keys & values. - - Checking the order of keys on all pages during checking. - - Support `CFLAGS_EXTRA` make-option for convenience. - - Preserve the last txnid while copying with compactification. - - Auto-reset running transaction in mdbx_txn_renew(). - - Automatically abort errored transaction in mdbx_txn_commit(). - - Auto-choose page size for large databases. - - Rearrange source files, rework build, options-support by CMake. - - Crutch for WSL1 (Windows subsystem for Linux). - - Refine install/uninstall targets. - - Support for Valgrind 3.14 and later. - - Add check-analyzer check-ubsan check-asan check-leak targets to Makefile. - - Minor fix/workaround to avoid UBSAN traps for `memcpy(ptr, NULL, 0)`. - - Avoid some GCC-analyzer false-positive warnings. - - -## 2020-03-18 - - - Workarounds for Wine (Windows compatibility layer for Linux). - - `MDBX_MAP_RESIZED` renamed to `MDBX_UNABLE_EXTEND_MAPSIZE`. - - Clarify API description, fix typos. - - Speedup runtime checks in debug/checked builds. - - Added checking for read/write transactions overlapping for the same thread, added `MDBX_TXN_OVERLAPPING` error and `MDBX_DBG_LEGACY_OVERLAP` option. - - Added `mdbx_key_from_jsonInteger()`, `mdbx_key_from_double()`, `mdbx_key_from_float()`, `mdbx_key_from_int64()` and `mdbx_key_from_int32()` functions. See `mdbx.h` for description. - - Fix compatibility (use zero for invalid DBI). - - Refine/clarify error messages. - - Avoids extra error messages "bad txn" from mdbx_chk when DB is corrupted. - - -## 2020-01-21 - - - Fix `mdbx_load` utility for custom comparators. - - Fix checks related to `MDBX_APPEND` flag inside `mdbx_cursor_put()`. - - Refine/fix dbi_bind() internals. - - Refine/fix handling `STATUS_CONFLICTING_ADDRESSES`. - - Rework `MDBX_DBG_DUMP` option to avoid disk I/O performance degradation. - - Add built-in help to test tool. - - Fix `mdbx_env_set_geometry()` for large page size. - - Fix env_set_geometry() for large pagesize. - - Clarify API description & comments, fix typos. - - -## 2019-12-31 - - - Fix returning MDBX_RESULT_TRUE from page_alloc(). - - Fix false-positive ASAN issue. - - Fix assertion for `MDBX_NOTLS` option. - - Rework `MADV_DONTNEED` threshold. - - Fix `mdbx_chk` utility for don't checking some numbers if walking on the B-tree was disabled. - - Use page's mp_txnid for basic integrity checking. - - Add `MDBX_FORCE_ASSERTIONS` built-time option. - - Rework `MDBX_DBG_DUMP` to avoid performance degradation. - - Rename `MDBX_NOSYNC` to `MDBX_SAFE_NOSYNC` for clarity. - - Interpret `ERROR_ACCESS_DENIED` from `OpenProcess()` as 'process exists'. - - Avoid using `FILE_FLAG_NO_BUFFERING` for compatibility with small database pages. - - Added install section for CMake. - - -## 2019-12-02 - - - Support for Mac OSX, FreeBSD, NetBSD, OpenBSD, DragonFly BSD, OpenSolaris, OpenIndiana (AIX and HP-UX pending). - - Use bootid for decisions of rollback. - - Counting retired pages and extended transaction info. - - Add `MDBX_ACCEDE` flag for database opening. - - Using OFD-locks and tracking for in-process multi-opening. - - Hot backup into pipe. - - Support for cmake & amalgamated sources. - - Fastest internal sort implementation. - - New internal dirty-list implementation with lazy sorting. - - Support for lazy-sync-to-disk with polling. - - Extended key length. - - Last update transaction number for each sub-database. - - Automatic read ahead enabling/disabling. - - More auto-compactification. - - Using -fsanitize=undefined and -Wpedantic options. - - Rework page merging. - - Nested transactions. - - API description. - - Checking for non-local filesystems to avoid DB corruption. - - -******************************************************************************** - - -For early changes see the git commit history. diff --git a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/README.md b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/README.md deleted file mode 100644 index 46e1c549202..00000000000 --- a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/README.md +++ /dev/null @@ -1,797 +0,0 @@ - - -### The origin has been migrated to [GitFlic](https://gitflic.ru/project/erthink/libmdbx) -since on 2022-04-15 the Github administration, without any warning -nor explanation, deleted _libmdbx_ along with a lot of other projects, -simultaneously blocking access for many developers. -For the same reason ~~Github~~ is blacklisted forever. - -GitFlic's developers plan to support other languages, -including English 和 中文, in the near future. - -### Основной репозиторий перемещен на [GitFlic](https://gitflic.ru/project/erthink/libmdbx) -так как 15 апреля 2022 администрация Github без предупреждения и -объяснения причин удалила _libmdbx_ вместе с массой других проектов, -одновременно заблокировав доступ многим разработчикам. -По этой же причине ~~Github~~ навсегда занесен в черный список. - --------------------------------------------------------------------------------- - -*The Future will (be) [Positive](https://www.ptsecurity.com). Всё будет хорошо.* - -> Please refer to the online [documentation](https://libmdbx.dqdkfa.ru) -> with [`C` API description](https://libmdbx.dqdkfa.ru/group__c__api.html) -> and pay attention to the [`C++` API](https://gitflic.ru/project/erthink/libmdbx/blob?file=mdbx.h%2B%2B#line-num-1). - -> Questions, feedback and suggestions are welcome to the [Telegram' group](https://t.me/libmdbx). - -> For NEWS take a look to the [ChangeLog](https://gitflic.ru/project/erthink/libmdbx/blob?file=ChangeLog.md) -> or the [TODO](https://gitflic.ru/project/erthink/libmdbx/blob?file=TODO.md). - - -libmdbx -======== - - - -_libmdbx_ is an extremely fast, compact, powerful, embedded, transactional -[key-value database](https://en.wikipedia.org/wiki/Key-value_database), -with [permissive license](https://gitflic.ru/project/erthink/libmdbx/blob?file=LICENSE). -_libmdbx_ has a specific set of properties and capabilities, -focused on creating unique lightweight solutions. - -1. Allows **a swarm of multi-threaded processes to -[ACID](https://en.wikipedia.org/wiki/ACID)ly read and update** several -key-value [maps](https://en.wikipedia.org/wiki/Associative_array) and -[multimaps](https://en.wikipedia.org/wiki/Multimap) in a locally-shared -database. - -2. Provides **extraordinary performance**, minimal overhead through -[Memory-Mapping](https://en.wikipedia.org/wiki/Memory-mapped_file) and -`Olog(N)` operations costs by virtue of [B+ -tree](https://en.wikipedia.org/wiki/B%2B_tree). - -3. Requires **no maintenance and no crash recovery** since it doesn't use -[WAL](https://en.wikipedia.org/wiki/Write-ahead_logging), but that might -be a caveat for write-intensive workloads with durability requirements. - -4. **Compact and friendly for fully embedding**. Only ≈25KLOC of `C11`, -≈64K x86 binary code of core, no internal threads neither server process(es), -but implements a simplified variant of the [Berkeley -DB](https://en.wikipedia.org/wiki/Berkeley_DB) and -[dbm](https://en.wikipedia.org/wiki/DBM_(computing)) API. - -5. Enforces [serializability](https://en.wikipedia.org/wiki/Serializability) for -writers just by single -[mutex](https://en.wikipedia.org/wiki/Mutual_exclusion) and affords -[wait-free](https://en.wikipedia.org/wiki/Non-blocking_algorithm#Wait-freedom) -for parallel readers without atomic/interlocked operations, while -**writing and reading transactions do not block each other**. - -6. **Guarantee data integrity** after crash unless this was explicitly -neglected in favour of write performance. - -7. Supports Linux, Windows, MacOS, Android, iOS, FreeBSD, DragonFly, Solaris, -OpenSolaris, OpenIndiana, NetBSD, OpenBSD and other systems compliant with -**POSIX.1-2008**. - - - -Historically, _libmdbx_ is a deeply revised and extended descendant of the amazing -[Lightning Memory-Mapped Database](https://en.wikipedia.org/wiki/Lightning_Memory-Mapped_Database). -_libmdbx_ inherits all benefits from _LMDB_, but resolves some issues and adds [a set of improvements](#improvements-beyond-lmdb). - -### MithrilDB and Future - - - -The next version is under non-public development from scratch and will be -released as **MithrilDB** and `libmithrildb` for libraries & packages. -Admittedly mythical [Mithril](https://en.wikipedia.org/wiki/Mithril) is -resembling silver but being stronger and lighter than steel. Therefore -_MithrilDB_ is a rightly relevant name. - -_MithrilDB_ is radically different from _libmdbx_ by the new database -format and API based on C++20. The goal of this revolution is to provide -a clearer and robust API, add more features and new valuable properties -of the database. All fundamental architectural problems of libmdbx/LMDB -have been solved there, but now the active development has been -suspended for top-three reasons: - -1. For now _libmdbx_ «mostly» enough for all [our products](https://www.ptsecurity.com/ww-en/products/), -and I’m busy in development of replication for scalability. -2. Waiting for fresh [Elbrus CPU](https://wiki.elbrus.ru/) of [e2k architecture](https://en.wikipedia.org/wiki/Elbrus_2000), -especially with hardware acceleration of [Streebog](https://en.wikipedia.org/wiki/Streebog) and -[Kuznyechik](https://en.wikipedia.org/wiki/Kuznyechik), which are required for Merkle tree, etc. -3. The expectation of needs and opportunities due to the wide use of NVDIMM (aka persistent memory), -modern NVMe and [Ангара](https://ru.wikipedia.org/wiki/Ангара_(интерконнект)). - -However, _MithrilDB_ will not be available for countries unfriendly to -Russia (i.e. acceded the sanctions, devil adepts and/or NATO). But it is -not yet known whether such restriction will be implemented only through -a license and support, either the source code will not be open at all. -Basically we are not inclined to allow our work to contribute to the -profit that goes to weapons that kill our relatives and friends. -NO OPTIONS. - -Nonetheless, I try not to make any promises regarding _MithrilDB_ until release. - -Contrary to _MithrilDB_, _libmdbx_ will forever free and open source. -Moreover with high-quality support whenever possible. Tu deviens -responsable pour toujours de ce que tu as apprivois. So we will continue -to comply with the original open license and the principles of -constructive cooperation, in spite of outright Github sabotage and -sanctions. I will also try to keep (not drop) Windows support, despite -it is an unused obsolete technology for us. - - - -``` -$ objdump -f -h -j .text libmdbx.so - - libmdbx.so: формат файла elf64-e2k - архитектура: elbrus-v6:64, флаги 0x00000150: - HAS_SYMS, DYNAMIC, D_PAGED - начальный адрес 0x0000000000021680 - - Разделы: - Idx Name Разм VMA LMA Фа смещ. Выр. - 10 .text 000ddd28 0000000000021680 0000000000021680 00021680 2**3 - CONTENTS, ALLOC, LOAD, READONLY, CODE - -$ cc --version - lcc:1.26.12:Jun-05-2022:e2k-v6-linux - gcc (GCC) 9.3.0 compatible -``` - ------ - -## Table of Contents -- [Characteristics](#characteristics) - - [Features](#features) - - [Limitations](#limitations) - - [Gotchas](#gotchas) - - [Comparison with other databases](#comparison-with-other-databases) - - [Improvements beyond LMDB](#improvements-beyond-lmdb) - - [History & Acknowledgments](#history) -- [Usage](#usage) - - [Building and Testing](#building-and-testing) - - [API description](#api-description) - - [Bindings](#bindings) -- [Performance comparison](#performance-comparison) - - [Integral performance](#integral-performance) - - [Read scalability](#read-scalability) - - [Sync-write mode](#sync-write-mode) - - [Lazy-write mode](#lazy-write-mode) - - [Async-write mode](#async-write-mode) - - [Cost comparison](#cost-comparison) - -# Characteristics - - - -## Features - -- Key-value data model, keys are always sorted. - -- Fully [ACID](https://en.wikipedia.org/wiki/ACID)-compliant, through to -[MVCC](https://en.wikipedia.org/wiki/Multiversion_concurrency_control) -and [CoW](https://en.wikipedia.org/wiki/Copy-on-write). - -- Multiple key-value sub-databases within a single datafile. - -- Range lookups, including range query estimation. - -- Efficient support for short fixed length keys, including native 32/64-bit integers. - -- Ultra-efficient support for [multimaps](https://en.wikipedia.org/wiki/Multimap). Multi-values sorted, searchable and iterable. Keys stored without duplication. - -- Data is [memory-mapped](https://en.wikipedia.org/wiki/Memory-mapped_file) and accessible directly/zero-copy. Traversal of database records is extremely-fast. - -- Transactions for readers and writers, ones do not block others. - -- Writes are strongly serialized. No transaction conflicts nor deadlocks. - -- Readers are [non-blocking](https://en.wikipedia.org/wiki/Non-blocking_algorithm), notwithstanding [snapshot isolation](https://en.wikipedia.org/wiki/Snapshot_isolation). - -- Nested write transactions. - -- Reads scale linearly across CPUs. - -- Continuous zero-overhead database compactification. - -- Automatic on-the-fly database size adjustment. - -- Customizable database page size. - -- `Olog(N)` cost of lookup, insert, update, and delete operations by virtue of [B+ tree characteristics](https://en.wikipedia.org/wiki/B%2B_tree#Characteristics). - -- Online hot backup. - -- Append operation for efficient bulk insertion of pre-sorted data. - -- No [WAL](https://en.wikipedia.org/wiki/Write-ahead_logging) nor any -transaction journal. No crash recovery needed. No maintenance is required. - -- No internal cache and/or memory management, all done by basic OS services. - -## Limitations - -- **Page size**: a power of 2, minimum `256` (mostly for testing), maximum `65536` bytes, default `4096` bytes. -- **Key size**: minimum `0`, maximum ≈½ pagesize (`2022` bytes for default 4K pagesize, `32742` bytes for 64K pagesize). -- **Value size**: minimum `0`, maximum `2146435072` (`0x7FF00000`) bytes for maps, ≈½ pagesize for multimaps (`2022` bytes for default 4K pagesize, `32742` bytes for 64K pagesize). -- **Write transaction size**: up to `1327217884` pages (`4.944272` TiB for default 4K pagesize, `79.108351` TiB for 64K pagesize). -- **Database size**: up to `2147483648` pages (≈`8.0` TiB for default 4K pagesize, ≈`128.0` TiB for 64K pagesize). -- **Maximum sub-databases**: `32765`. - -## Gotchas - -1. There cannot be more than one writer at a time, i.e. no more than one write transaction at a time. - -2. _libmdbx_ is based on [B+ tree](https://en.wikipedia.org/wiki/B%2B_tree), so access to database pages is mostly random. -Thus SSDs provide a significant performance boost over spinning disks for large databases. - -3. _libmdbx_ uses [shadow paging](https://en.wikipedia.org/wiki/Shadow_paging) instead of [WAL](https://en.wikipedia.org/wiki/Write-ahead_logging). -Thus syncing data to disk might be a bottleneck for write intensive workload. - -4. _libmdbx_ uses [copy-on-write](https://en.wikipedia.org/wiki/Copy-on-write) for [snapshot isolation](https://en.wikipedia.org/wiki/Snapshot_isolation) during updates, -but read transactions prevents recycling an old retired/freed pages, since it read ones. Thus altering of data during a parallel -long-lived read operation will increase the process work set, may exhaust entire free database space, -the database can grow quickly, and result in performance degradation. -Try to avoid long running read transactions. - -5. _libmdbx_ is extraordinarily fast and provides minimal overhead for data access, -so you should reconsider using brute force techniques and double check your code. -On the one hand, in the case of _libmdbx_, a simple linear search may be more profitable than complex indexes. -On the other hand, if you make something suboptimally, you can notice detrimentally only on sufficiently large data. - -## Comparison with other databases -For now please refer to [chapter of "BoltDB comparison with other -databases"](https://github.com/coreos/bbolt#comparison-with-other-databases) -which is also (mostly) applicable to _libmdbx_. - - - - - -Improvements beyond LMDB -======================== - -_libmdbx_ is superior to legendary _[LMDB](https://symas.com/lmdb/)_ in -terms of features and reliability, not inferior in performance. In -comparison to _LMDB_, _libmdbx_ make things "just work" perfectly and -out-of-the-box, not silently and catastrophically break down. The list -below is pruned down to the improvements most notable and obvious from -the user's point of view. - -## Added Features - -1. Keys could be more than 2 times longer than _LMDB_. - > For DB with default page size _libmdbx_ support keys up to 2022 bytes - > and up to 32742 bytes for 64K page size. _LMDB_ allows key size up to - > 511 bytes and may silently loses data with large values. - -2. Up to 30% faster than _LMDB_ in [CRUD](https://en.wikipedia.org/wiki/Create,_read,_update_and_delete) benchmarks. - > Benchmarks of the in-[tmpfs](https://en.wikipedia.org/wiki/Tmpfs) scenarios, - > that tests the speed of the engine itself, showned that _libmdbx_ 10-20% faster than _LMDB_, - > and up to 30% faster when _libmdbx_ compiled with specific build options - > which downgrades several runtime checks to be match with LMDB behaviour. - > - > These and other results could be easily reproduced with [ioArena](https://abf.io/erthink/ioarena.git) just by `make bench-quartet` command, - > including comparisons with [RockDB](https://en.wikipedia.org/wiki/RocksDB) - > and [WiredTiger](https://en.wikipedia.org/wiki/WiredTiger). - -3. Automatic on-the-fly database size adjustment, both increment and reduction. - > _libmdbx_ manages the database size according to parameters specified - > by `mdbx_env_set_geometry()` function, - > ones include the growth step and the truncation threshold. - > - > Unfortunately, on-the-fly database size adjustment doesn't work under [Wine](https://en.wikipedia.org/wiki/Wine_(software)) - > due to its internal limitations and unimplemented functions, i.e. the `MDBX_UNABLE_EXTEND_MAPSIZE` error will be returned. - -4. Automatic continuous zero-overhead database compactification. - > During each commit _libmdbx_ merges a freeing pages which adjacent with the unallocated area - > at the end of file, and then truncates unused space when a lot enough of. - -5. The same database format for 32- and 64-bit builds. - > _libmdbx_ database format depends only on the [endianness](https://en.wikipedia.org/wiki/Endianness) but not on the [bitness](https://en.wiktionary.org/wiki/bitness). - -6. LIFO policy for Garbage Collection recycling. This can significantly increase write performance due write-back disk cache up to several times in a best case scenario. - > LIFO means that for reuse will be taken the latest becomes unused pages. - > Therefore the loop of database pages circulation becomes as short as possible. - > In other words, the set of pages, that are (over)written in memory and on disk during a series of write transactions, will be as small as possible. - > Thus creates ideal conditions for the battery-backed or flash-backed disk cache efficiency. - -7. Fast estimation of range query result volume, i.e. how many items can -be found between a `KEY1` and a `KEY2`. This is a prerequisite for build -and/or optimize query execution plans. - > _libmdbx_ performs a rough estimate based on common B-tree pages of the paths from root to corresponding keys. - -8. `mdbx_chk` utility for database integrity check. -Since version 0.9.1, the utility supports checking the database using any of the three meta pages and the ability to switch to it. - -9. Support for opening databases in the exclusive mode, including on a network share. - -10. Zero-length for keys and values. - -11. Ability to determine whether the particular data is on a dirty page -or not, that allows to avoid copy-out before updates. - -12. Extended information of whole-database, sub-databases, transactions, readers enumeration. - > _libmdbx_ provides a lot of information, including dirty and leftover pages - > for a write transaction, reading lag and holdover space for read transactions. - -13. Extended update and delete operations. - > _libmdbx_ allows one _at once_ with getting previous value - > and addressing the particular item from multi-value with the same key. - -14. Useful runtime options for tuning engine to application's requirements and use cases specific. - -15. Automated steady sync-to-disk upon several thresholds and/or timeout via cheap polling. - -16. Sequence generation and three persistent 64-bit markers. - -17. Handle-Slow-Readers callback to resolve a database full/overflow issues due to long-lived read transaction(s). - -18. Ability to determine whether the cursor is pointed to a key-value -pair, to the first, to the last, or not set to anything. - - -## Other fixes and specifics - -1. Fixed more than 10 significant errors, in particular: page leaks, -wrong sub-database statistics, segfault in several conditions, -nonoptimal page merge strategy, updating an existing record with -a change in data size (including for multimap), etc. - -2. All cursors can be reused and should be closed explicitly, -regardless ones were opened within a write or read transaction. - -3. Opening database handles are spared from race conditions and -pre-opening is not needed. - -4. Returning `MDBX_EMULTIVAL` error in case of ambiguous update or delete. - -5. Guarantee of database integrity even in asynchronous unordered write-to-disk mode. - > _libmdbx_ propose additional trade-off by `MDBX_SAFE_NOSYNC` with append-like manner for updates, - > that avoids database corruption after a system crash contrary to LMDB. - > Nevertheless, the `MDBX_UTTERLY_NOSYNC` mode is available to match LMDB's behaviour for `MDB_NOSYNC`. - -6. On **MacOS & iOS** the `fcntl(F_FULLFSYNC)` syscall is used _by -default_ to synchronize data with the disk, as this is [the only way to -guarantee data -durability](https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/fsync.2.html) -in case of power failure. Unfortunately, in scenarios with high write -intensity, the use of `F_FULLFSYNC` significantly degrades performance -compared to LMDB, where the `fsync()` syscall is used. Therefore, -_libmdbx_ allows you to override this behavior by defining the -`MDBX_OSX_SPEED_INSTEADOF_DURABILITY=1` option while build the library. - -7. On **Windows** the `LockFileEx()` syscall is used for locking, since -it allows place the database on network drives, and provides protection -against incompetent user actions (aka -[poka-yoke](https://en.wikipedia.org/wiki/Poka-yoke)). Therefore -_libmdbx_ may be a little lag in performance tests from LMDB where the -named mutexes are used. - - - - -# History - -Historically, _libmdbx_ is a deeply revised and extended descendant of the -[Lightning Memory-Mapped Database](https://en.wikipedia.org/wiki/Lightning_Memory-Mapped_Database). -At first the development was carried out within the -[ReOpenLDAP](https://web.archive.org/web/https://github.com/erthink/ReOpenLDAP) project. About a -year later _libmdbx_ was separated into a standalone project, which was -[presented at Highload++ 2015 -conference](http://www.highload.ru/2015/abstracts/1831.html). - -Since 2017 _libmdbx_ is used in [Fast Positive Tables](https://gitflic.ru/project/erthink/libfpta), -and development is funded by [Positive Technologies](https://www.ptsecurity.com). - -On 2022-04-15 the Github administration, without any warning nor -explanation, deleted _libmdbx_ along with a lot of other projects, -simultaneously blocking access for many developers. Therefore on -2022-04-21 we have migrated to a reliable trusted infrastructure. -The origin for now is at [GitFlic](https://gitflic.ru/project/erthink/libmdbx) -with backup at [ABF by ROSA Лаб](https://abf.rosalinux.ru/erthink/libmdbx). -For the same reason ~~Github~~ is blacklisted forever. - -## Acknowledgments -Howard Chu is the author of LMDB, from which -originated the _libmdbx_ in 2015. - -Martin Hedenfalk is the author of `btree.c` code, which -was used to begin development of LMDB. - - - --------------------------------------------------------------------------------- - -Usage -===== - - - -Currently, libmdbx is only available in a -[source code](https://en.wikipedia.org/wiki/Source_code) form. -Packages support for common Linux distributions is planned in the future, -since release the version 1.0. - -## Source code embedding - -_libmdbx_ provides two official ways for integration in source code form: - -1. Using an amalgamated source code which available in the [releases section](https://gitflic.ru/project/erthink/libmdbx/release) on GitFlic. - > An amalgamated source code includes all files required to build and - > use _libmdbx_, but not for testing _libmdbx_ itself. - > Beside the releases an amalgamated sources could be created any time from the original clone of git - > repository on Linux by executing `make dist`. As a result, the desired - > set of files will be formed in the `dist` subdirectory. - -2. Adding the complete source code as a `git submodule` from the [origin git repository](https://gitflic.ru/project/erthink/libmdbx) on GitFlic. - > This allows you to build as _libmdbx_ and testing tool. - > On the other hand, this way requires you to pull git tags, and use C++11 compiler for test tool. - -_**Please, avoid using any other techniques.**_ Otherwise, at least -don't ask for support and don't name such chimeras `libmdbx`. - - - -## Building and Testing - -Both amalgamated and original source code provides build through the use -[CMake](https://cmake.org/) or [GNU -Make](https://www.gnu.org/software/make/) with -[bash](https://en.wikipedia.org/wiki/Bash_(Unix_shell)). All build ways -are completely traditional and have minimal prerequirements like -`build-essential`, i.e. the non-obsolete C/C++ compiler and a -[SDK](https://en.wikipedia.org/wiki/Software_development_kit) for the -target platform. Obviously you need building tools itself, i.e. `git`, -`cmake` or GNU `make` with `bash`. For your convenience, `make help` -and `make options` are also available for listing existing targets -and build options respectively. - -The only significant specificity is that git' tags are required -to build from complete (not amalgamated) source codes. -Executing **`git fetch --tags --force --prune`** is enough to get ones, -and `--unshallow` or `--update-shallow` is required for shallow cloned case. - -So just using CMake or GNU Make in your habitual manner and feel free to -fill an issue or make pull request in the case something will be -unexpected or broken down. - -### Testing -The amalgamated source code does not contain any tests for or several reasons. -Please read [the explanation](https://libmdbx.dqdkfa.ru/dead-github/issues/214#issuecomment-870717981) and don't ask to alter this. -So for testing _libmdbx_ itself you need a full source code, i.e. the clone of a git repository, there is no option. - -The full source code of _libmdbx_ has a [`test` subdirectory](https://gitflic.ru/project/erthink/libmdbx/tree/master/test) with minimalistic test "framework". -Actually yonder is a source code of the `mdbx_test` – console utility which has a set of command-line options that allow construct and run a reasonable enough test scenarios. -This test utility is intended for _libmdbx_'s developers for testing library itself, but not for use by users. -Therefore, only basic information is provided: - - - There are few CRUD-based test cases (hill, TTL, nested, append, jitter, etc), - which can be combined to test the concurrent operations within shared database in a multi-processes environment. - This is the `basic` test scenario. - - The `Makefile` provide several self-described targets for testing: `smoke`, `test`, `check`, `memcheck`, `test-valgrind`, - `test-asan`, `test-leak`, `test-ubsan`, `cross-gcc`, `cross-qemu`, `gcc-analyzer`, `smoke-fault`, `smoke-singleprocess`, - `test-singleprocess`, 'long-test'. Please run `make --help` if doubt. - - In addition to the `mdbx_test` utility, there is the script [`long_stochastic.sh`](https://gitflic.ru/project/erthink/libmdbx/blob/master/test/long_stochastic.sh), - which calls `mdbx_test` by going through set of modes and options, with gradually increasing the number of operations and the size of transactions. - This script is used for mostly of all automatic testing, including `Makefile` targets and Continuous Integration. - - Brief information of available command-line options is available by `--help`. - However, you should dive into source code to get all, there is no option. - -Anyway, no matter how thoroughly the _libmdbx_ is tested, you should rely only on your own tests for a few reasons: - -1. Mostly of all use cases are unique. - So it is no warranty that your use case was properly tested, even the _libmdbx_'s tests engages stochastic approach. -2. If there are problems, then your test on the one hand will help to verify whether you are using _libmdbx_ correctly, - on the other hand it will allow to reproduce the problem and insure against regression in a future. -3. Actually you should rely on than you checked by yourself or take a risk. - - -### Common important details - -#### Build reproducibility -By default _libmdbx_ track build time via `MDBX_BUILD_TIMESTAMP` build option and macro. -So for a [reproducible builds](https://en.wikipedia.org/wiki/Reproducible_builds) you should predefine/override it to known fixed string value. -For instance: - - - for reproducible build with make: `make MDBX_BUILD_TIMESTAMP=unknown ` ... - - or during configure by CMake: `cmake -DMDBX_BUILD_TIMESTAMP:STRING=unknown ` ... - -Of course, in addition to this, your toolchain must ensure the reproducibility of builds. -For more information please refer to [reproducible-builds.org](https://reproducible-builds.org/). - -#### Containers -There are no special traits nor quirks if you use libmdbx ONLY inside the single container. -But in a cross-container cases or with a host-container(s) mix the two major things MUST be -guaranteed: - -1. Coherence of memory mapping content and unified page cache inside OS kernel for host and all container(s) operated with a DB. -Basically this means must be only a single physical copy of each memory mapped DB' page in the system memory. - -2. Uniqueness of [PID](https://en.wikipedia.org/wiki/Process_identifier) values and/or a common space for ones: - - for POSIX systems: PID uniqueness for all processes operated with a DB. - I.e. the `--pid=host` is required for run DB-aware processes inside Docker, - either without host interaction a `--pid=container:` with the same name/id. - - for non-POSIX (i.e. Windows) systems: inter-visibility of processes handles. - I.e. the `OpenProcess(SYNCHRONIZE, ..., PID)` must return reasonable error, - including `ERROR_ACCESS_DENIED`, - but not the `ERROR_INVALID_PARAMETER` as for an invalid/non-existent PID. - -#### DSO/DLL unloading and destructors of Thread-Local-Storage objects -When building _libmdbx_ as a shared library or use static _libmdbx_ as a -part of another dynamic library, it is advisable to make sure that your -system ensures the correctness of the call destructors of -Thread-Local-Storage objects when unloading dynamic libraries. - -If this is not the case, then unloading a dynamic-link library with -_libmdbx_ code inside, can result in either a resource leak or a crash -due to calling destructors from an already unloaded DSO/DLL object. The -problem can only manifest in a multithreaded application, which makes -the unloading of shared dynamic libraries with _libmdbx_ code inside, -after using _libmdbx_. It is known that TLS-destructors are properly -maintained in the following cases: - -- On all modern versions of Windows (Windows 7 and later). - -- On systems with the -[`__cxa_thread_atexit_impl()`](https://sourceware.org/glibc/wiki/Destructor%20support%20for%20thread_local%20variables) -function in the standard C library, including systems with GNU libc -version 2.18 and later. - -- On systems with libpthread/ntpl from GNU libc with bug fixes -[#21031](https://sourceware.org/bugzilla/show_bug.cgi?id=21031) and -[#21032](https://sourceware.org/bugzilla/show_bug.cgi?id=21032), or -where there are no similar bugs in the pthreads implementation. - -### Linux and other platforms with GNU Make -To build the library it is enough to execute `make all` in the directory -of source code, and `make check` to execute the basic tests. - -If the `make` installed on the system is not GNU Make, there will be a -lot of errors from make when trying to build. In this case, perhaps you -should use `gmake` instead of `make`, or even `gnu-make`, etc. - -### FreeBSD and related platforms -As a rule on BSD and it derivatives the default is to use Berkeley Make and -[Bash](https://en.wikipedia.org/wiki/Bash_(Unix_shell)) is not installed. - -So you need to install the required components: GNU Make, Bash, C and C++ -compilers compatible with GCC or CLANG. After that, to build the -library, it is enough to execute `gmake all` (or `make all`) in the -directory with source code, and `gmake check` (or `make check`) to run -the basic tests. - -### Windows -For build _libmdbx_ on Windows the _original_ CMake and [Microsoft Visual -Studio 2019](https://en.wikipedia.org/wiki/Microsoft_Visual_Studio) are -recommended. Please use the recent versions of CMake, Visual Studio and Windows -SDK to avoid troubles with C11 support and `alignas()` feature. - -For build by MinGW the 10.2 or recent version coupled with a modern CMake are required. -So it is recommended to use [chocolatey](https://chocolatey.org/) to install and/or update the ones. - -Another ways to build is potentially possible but not supported and will not. -The `CMakeLists.txt` or `GNUMakefile` scripts will probably need to be modified accordingly. -Using other methods do not forget to add the `ntdll.lib` to linking. - -It should be noted that in _libmdbx_ was efforts to avoid -runtime dependencies from CRT and other MSVC libraries. -For this is enough to pass the `-DMDBX_WITHOUT_MSVC_CRT:BOOL=ON` option -during configure by CMake. - -An example of running a basic test script can be found in the -[CI-script](appveyor.yml) for [AppVeyor](https://www.appveyor.com/). To -run the [long stochastic test scenario](test/long_stochastic.sh), -[bash](https://en.wikipedia.org/wiki/Bash_(Unix_shell)) is required, and -such testing is recommended with placing the test data on the -[RAM-disk](https://en.wikipedia.org/wiki/RAM_drive). - -### Windows Subsystem for Linux -_libmdbx_ could be used in [WSL2](https://en.wikipedia.org/wiki/Windows_Subsystem_for_Linux#WSL_2) -but NOT in [WSL1](https://en.wikipedia.org/wiki/Windows_Subsystem_for_Linux#WSL_1) environment. -This is a consequence of the fundamental shortcomings of _WSL1_ and cannot be fixed. -To avoid data loss, _libmdbx_ returns the `ENOLCK` (37, "No record locks available") -error when opening the database in a _WSL1_ environment. - -### MacOS -Current [native build tools](https://en.wikipedia.org/wiki/Xcode) for -MacOS include GNU Make, CLANG and an outdated version of Bash. -Therefore, to build the library, it is enough to run `make all` in the -directory with source code, and run `make check` to execute the base -tests. If something goes wrong, it is recommended to install -[Homebrew](https://brew.sh/) and try again. - -To run the [long stochastic test scenario](test/long_stochastic.sh), you -will need to install the current (not outdated) version of -[Bash](https://en.wikipedia.org/wiki/Bash_(Unix_shell)). To do this, we -recommend that you install [Homebrew](https://brew.sh/) and then execute -`brew install bash`. - -### Android -We recommend using CMake to build _libmdbx_ for Android. -Please refer to the [official guide](https://developer.android.com/studio/projects/add-native-code). - -### iOS -To build _libmdbx_ for iOS, we recommend using CMake with the -["toolchain file"](https://cmake.org/cmake/help/latest/variable/CMAKE_TOOLCHAIN_FILE.html) -from the [ios-cmake](https://github.com/leetal/ios-cmake) project. - - - -## API description - -Please refer to the online [_libmdbx_ API reference](https://libmdbx.dqdkfa.ru/docs) -and/or see the [mdbx.h++](mdbx.h%2B%2B) and [mdbx.h](mdbx.h) headers. - - - -Bindings -======== - -| Runtime | Repo | Author | -| ------- | ------ | ------ | -| Scala | [mdbx4s](https://github.com/david-bouyssie/mdbx4s) | [David Bouyssié](https://github.com/david-bouyssie) | -| Haskell | [libmdbx-hs](https://hackage.haskell.org/package/libmdbx) | [Francisco Vallarino](https://github.com/fjvallarino) | -| NodeJS, [Deno](https://deno.land/) | [lmdbx-js](https://github.com/kriszyp/lmdbx-js) | [Kris Zyp](https://github.com/kriszyp/) -| NodeJS | [node-mdbx](https://www.npmjs.com/package/node-mdbx/) | [Сергей Федотов](mailto:sergey.fedotov@corp.mail.ru) | -| Ruby | [ruby-mdbx](https://rubygems.org/gems/mdbx/) | [Mahlon E. Smith](https://github.com/mahlonsmith) | -| Go | [mdbx-go](https://github.com/torquem-ch/mdbx-go) | [Alex Sharov](https://github.com/AskAlexSharov) | -| [Nim](https://en.wikipedia.org/wiki/Nim_(programming_language)) | [NimDBX](https://github.com/snej/nimdbx) | [Jens Alfke](https://github.com/snej) -| Lua | [lua-libmdbx](https://github.com/mah0x211/lua-libmdbx) | [Masatoshi Fukunaga](https://github.com/mah0x211) | -| Rust | [libmdbx-rs](https://github.com/vorot93/libmdbx-rs) | [Artem Vorotnikov](https://github.com/vorot93) | -| Rust | [mdbx](https://crates.io/crates/mdbx) | [gcxfd](https://github.com/gcxfd) | -| Java | [mdbxjni](https://github.com/castortech/mdbxjni) | [Castor Technologies](https://castortech.com/) | -| Python (draft) | [python-bindings](https://libmdbx.dqdkfa.ru/dead-github/commits/python-bindings) branch | [Noel Kuntze](https://github.com/Thermi) -| .NET (obsolete) | [mdbx.NET](https://github.com/wangjia184/mdbx.NET) | [Jerry Wang](https://github.com/wangjia184) | - - - --------------------------------------------------------------------------------- - - - -Performance comparison -====================== - -All benchmarks were done in 2015 by [IOArena](https://abf.io/erthink/ioarena.git) -and multiple [scripts](https://github.com/pmwkaa/ioarena/tree/HL%2B%2B2015) -runs on Lenovo Carbon-2 laptop, i7-4600U 2.1 GHz (2 physical cores, 4 HyperThreading cores), 8 Gb RAM, -SSD SAMSUNG MZNTD512HAGL-000L1 (DXT23L0Q) 512 Gb. - -## Integral performance - -Here showed sum of performance metrics in 3 benchmarks: - - - Read/Search on the machine with 4 logical CPUs in HyperThreading mode (i.e. actually 2 physical CPU cores); - - - Transactions with [CRUD](https://en.wikipedia.org/wiki/CRUD) - operations in sync-write mode (fdatasync is called after each - transaction); - - - Transactions with [CRUD](https://en.wikipedia.org/wiki/CRUD) - operations in lazy-write mode (moment to sync data to persistent storage - is decided by OS). - -*Reasons why asynchronous mode isn't benchmarked here:* - - 1. It doesn't make sense as it has to be done with DB engines, oriented - for keeping data in memory e.g. [Tarantool](https://tarantool.io/), - [Redis](https://redis.io/)), etc. - - 2. Performance gap is too high to compare in any meaningful way. - -![Comparison #1: Integral Performance](https://libmdbx.dqdkfa.ru/img/perf-slide-1.png) - --------------------------------------------------------------------------------- - -## Read Scalability - -Summary performance with concurrent read/search queries in 1-2-4-8 -threads on the machine with 4 logical CPUs in HyperThreading mode (i.e. actually 2 physical CPU cores). - -![Comparison #2: Read Scalability](https://libmdbx.dqdkfa.ru/img/perf-slide-2.png) - --------------------------------------------------------------------------------- - -## Sync-write mode - - - Linear scale on left and dark rectangles mean arithmetic mean - transactions per second; - - - Logarithmic scale on right is in seconds and yellow intervals mean - execution time of transactions. Each interval shows minimal and maximum - execution time, cross marks standard deviation. - -**10,000 transactions in sync-write mode**. In case of a crash all data -is consistent and conforms to the last successful transaction. The -[fdatasync](https://linux.die.net/man/2/fdatasync) syscall is used after -each write transaction in this mode. - -In the benchmark each transaction contains combined CRUD operations (2 -inserts, 1 read, 1 update, 1 delete). Benchmark starts on an empty database -and after full run the database contains 10,000 small key-value records. - -![Comparison #3: Sync-write mode](https://libmdbx.dqdkfa.ru/img/perf-slide-3.png) - --------------------------------------------------------------------------------- - -## Lazy-write mode - - - Linear scale on left and dark rectangles mean arithmetic mean of - thousands transactions per second; - - - Logarithmic scale on right in seconds and yellow intervals mean - execution time of transactions. Each interval shows minimal and maximum - execution time, cross marks standard deviation. - -**100,000 transactions in lazy-write mode**. In case of a crash all data -is consistent and conforms to the one of last successful transactions, but -transactions after it will be lost. Other DB engines use -[WAL](https://en.wikipedia.org/wiki/Write-ahead_logging) or transaction -journal for that, which in turn depends on order of operations in the -journaled filesystem. _libmdbx_ doesn't use WAL and hands I/O operations -to filesystem and OS kernel (mmap). - -In the benchmark each transaction contains combined CRUD operations (2 -inserts, 1 read, 1 update, 1 delete). Benchmark starts on an empty database -and after full run the database contains 100,000 small key-value -records. - - -![Comparison #4: Lazy-write mode](https://libmdbx.dqdkfa.ru/img/perf-slide-4.png) - --------------------------------------------------------------------------------- - -## Async-write mode - - - Linear scale on left and dark rectangles mean arithmetic mean of - thousands transactions per second; - - - Logarithmic scale on right in seconds and yellow intervals mean - execution time of transactions. Each interval shows minimal and maximum - execution time, cross marks standard deviation. - -**1,000,000 transactions in async-write mode**. -In case of a crash all data is consistent and conforms to the one of last successful transactions, -but lost transaction count is much higher than in -lazy-write mode. All DB engines in this mode do as little writes as -possible on persistent storage. _libmdbx_ uses -[msync(MS_ASYNC)](https://linux.die.net/man/2/msync) in this mode. - -In the benchmark each transaction contains combined CRUD operations (2 -inserts, 1 read, 1 update, 1 delete). Benchmark starts on an empty database -and after full run the database contains 10,000 small key-value records. - -![Comparison #5: Async-write mode](https://libmdbx.dqdkfa.ru/img/perf-slide-5.png) - --------------------------------------------------------------------------------- - -## Cost comparison - -Summary of used resources during lazy-write mode benchmarks: - - - Read and write IOPs; - - - Sum of user CPU time and sys CPU time; - - - Used space on persistent storage after the test and closed DB, but not - waiting for the end of all internal housekeeping operations (LSM - compactification, etc). - -_ForestDB_ is excluded because benchmark showed it's resource -consumption for each resource (CPU, IOPs) much higher than other engines -which prevents to meaningfully compare it with them. - -All benchmark data is gathered by -[getrusage()](http://man7.org/linux/man-pages/man2/getrusage.2.html) -syscall and by scanning the data directory. - -![Comparison #6: Cost comparison](https://libmdbx.dqdkfa.ru/img/perf-slide-6.png) - - diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index 4a37b941bd9..ff09838e3c3 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -186,8 +186,8 @@ where /// /// Note: /// - /// * LMDB stores all the freelists in the designated database 0 in each environment, and the - /// freelist count is stored at the beginning of the value as `libc::size_t` in the native + /// * MDBX stores all the freelists in the designated database 0 in each environment, and the + /// freelist count is stored at the beginning of the value as `libc::uint32_t` in the native /// byte order. /// /// * It will create a read transaction to traverse the freelist database. @@ -199,16 +199,12 @@ where for result in cursor { let (_key, value) = result?; - if value.len() < mem::size_of::() { + if value.len() < size_of::() { return Err(Error::Corrupted) } - let s = &value[..mem::size_of::()]; - if cfg!(target_pointer_width = "64") { - freelist += NativeEndian::read_u64(s) as usize; - } else { - freelist += NativeEndian::read_u32(s) as usize; - } + let s = &value[..size_of::()]; + freelist += NativeEndian::read_u32(s) as usize; } Ok(freelist) diff --git a/crates/storage/provider/src/post_state/mod.rs b/crates/storage/provider/src/post_state/mod.rs index ad4c8a7485f..b17190af6b7 100644 --- a/crates/storage/provider/src/post_state/mod.rs +++ b/crates/storage/provider/src/post_state/mod.rs @@ -681,7 +681,7 @@ impl PostState { let contract_log_pruner = self .prune_modes - .contract_logs_filter + .receipts_log_filter .group_by_block(tip, None) .map_err(|e| Error::Custom(e.to_string()))?; diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 506cc59ebab..b45417909b9 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -111,14 +111,18 @@ impl ProviderFactory { // If we pruned account or storage history, we can't return state on every historical block. // Instead, we should cap it at the latest prune checkpoint for corresponding prune part. - if let Some(prune_checkpoint) = account_history_prune_checkpoint { + if let Some(prune_checkpoint_block_number) = + account_history_prune_checkpoint.and_then(|checkpoint| checkpoint.block_number) + { state_provider = state_provider.with_lowest_available_account_history_block_number( - prune_checkpoint.block_number + 1, + prune_checkpoint_block_number + 1, ); } - if let Some(prune_checkpoint) = storage_history_prune_checkpoint { + if let Some(prune_checkpoint_block_number) = + storage_history_prune_checkpoint.and_then(|checkpoint| checkpoint.block_number) + { state_provider = state_provider.with_lowest_available_storage_history_block_number( - prune_checkpoint.block_number + 1, + prune_checkpoint_block_number + 1, ); } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index b013ee69773..85c497cc443 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -17,7 +17,7 @@ use reth_db::{ sharded_key, storage_sharded_key::StorageShardedKey, AccountBeforeTx, BlockNumberAddress, ShardedKey, StoredBlockBodyIndices, StoredBlockOmmers, StoredBlockWithdrawals, }, - table::Table, + table::{Table, TableRow}, tables, transaction::{DbTx, DbTxMut}, BlockNumberList, DatabaseError, @@ -624,85 +624,61 @@ impl<'this, TX: DbTxMut<'this> + DbTx<'this>> DatabaseProvider<'this, TX> { } /// Prune the table for the specified pre-sorted key iterator. - /// Returns number of rows pruned. - pub fn prune_table_with_iterator( - &self, - keys: impl IntoIterator, - ) -> std::result::Result { - self.prune_table_with_iterator_in_batches::(keys, usize::MAX, |_| {}, |_| false) - } - - /// Prune the table for the specified pre-sorted key iterator, calling `chunk_callback` after - /// every `batch_size` pruned rows with number of total rows pruned. - /// - /// `skip_filter` can be used to skip pruning certain elements. /// /// Returns number of rows pruned. - pub fn prune_table_with_iterator_in_batches( + pub fn prune_table_with_iterator( &self, keys: impl IntoIterator, - batch_size: usize, - mut batch_callback: impl FnMut(usize), - skip_filter: impl Fn(&T::Value) -> bool, - ) -> std::result::Result { + limit: usize, + mut delete_callback: impl FnMut(TableRow), + ) -> std::result::Result<(usize, bool), DatabaseError> { let mut cursor = self.tx.cursor_write::()?; let mut deleted = 0; - for key in keys { - if let Some((_, value)) = cursor.seek_exact(key)? { - if !skip_filter(&value) { - cursor.delete_current()?; - deleted += 1; - } + let mut keys = keys.into_iter(); + for key in &mut keys { + let row = cursor.seek_exact(key.clone())?; + if let Some(row) = row { + cursor.delete_current()?; + deleted += 1; + delete_callback(row); } - if deleted % batch_size == 0 { - batch_callback(deleted); + if deleted == limit { + break } } - if deleted % batch_size != 0 { - batch_callback(deleted); - } - - Ok(deleted) + Ok((deleted, keys.next().is_none())) } - /// Prune the table for the specified key range, calling `chunk_callback` after every - /// `batch_size` pruned rows with number of total unique keys and total rows pruned. For dupsort - /// tables, these numbers will be different as one key can correspond to multiple rows. + /// Prune the table for the specified key range. /// - /// Returns number of rows pruned. - pub fn prune_table_with_range_in_batches( + /// Returns number of total unique keys and total rows pruned pruned. + pub fn prune_table_with_range( &self, - keys: impl RangeBounds, - batch_size: usize, - mut batch_callback: impl FnMut(usize, usize), - ) -> std::result::Result<(), DatabaseError> { + keys: impl RangeBounds + Clone + Debug, + limit: usize, + mut skip_filter: impl FnMut(&TableRow) -> bool, + mut delete_callback: impl FnMut(TableRow), + ) -> std::result::Result<(usize, bool), DatabaseError> { let mut cursor = self.tx.cursor_write::()?; - let mut walker = cursor.walk_range(keys)?; - let mut deleted_keys = 0; - let mut deleted_rows = 0; - let mut previous_key = None; - - while let Some((key, _)) = walker.next().transpose()? { - walker.delete_current()?; - deleted_rows += 1; - if previous_key.as_ref().map(|previous_key| previous_key != &key).unwrap_or(true) { - deleted_keys += 1; - previous_key = Some(key); - } + let mut walker = cursor.walk_range(keys.clone())?; + let mut deleted = 0; - if deleted_rows % batch_size == 0 { - batch_callback(deleted_keys, deleted_rows); + while let Some(row) = walker.next().transpose()? { + if !skip_filter(&row) { + walker.delete_current()?; + deleted += 1; + delete_callback(row); } - } - if deleted_rows % batch_size != 0 { - batch_callback(deleted_keys, deleted_rows); + if deleted == limit { + break + } } - Ok(()) + Ok((deleted, walker.next().transpose()?.is_none())) } /// Load shard and remove it. If list is empty, last shard was full or diff --git a/crates/tasks/Cargo.toml b/crates/tasks/Cargo.toml index fa9f52b33ed..eaff30d28cf 100644 --- a/crates/tasks/Cargo.toml +++ b/crates/tasks/Cargo.toml @@ -17,7 +17,7 @@ futures-util.workspace = true ## metrics reth-metrics.workspace = true -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true ## misc tracing.workspace = true diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index dc8376e855f..a36d71776a0 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -34,7 +34,7 @@ tokio-stream.workspace = true # metrics reth-metrics.workspace = true -metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation +metrics.workspace = true # misc aquamarine = "0.3.0" diff --git a/crates/transaction-pool/src/blobstore/maintain.rs b/crates/transaction-pool/src/blobstore/maintain.rs deleted file mode 100644 index cfc4c8fc68c..00000000000 --- a/crates/transaction-pool/src/blobstore/maintain.rs +++ /dev/null @@ -1,27 +0,0 @@ -//! Support for maintaining the blob pool. - -use crate::blobstore::BlobStore; -use reth_primitives::H256; -use std::collections::BTreeMap; - -/// The type that is used to maintain the blob store and discard finalized transactions. -#[derive(Debug)] -#[allow(unused)] -pub struct BlobStoreMaintainer { - /// The blob store that holds all the blob data. - store: S, - /// Keeps track of the blob transactions that are in blocks. - blob_txs_in_blocks: BTreeMap>, -} - -impl BlobStoreMaintainer { - /// Creates a new blob store maintenance instance. - pub fn new(store: S) -> Self { - Self { store, blob_txs_in_blocks: Default::default() } - } -} - -impl BlobStoreMaintainer { - /// Invoked when a block is finalized. - pub fn on_finalized(&mut self, _block_number: u64) {} -} diff --git a/crates/transaction-pool/src/blobstore/mem.rs b/crates/transaction-pool/src/blobstore/mem.rs index 6d1dcb76aa1..187b9026f0b 100644 --- a/crates/transaction-pool/src/blobstore/mem.rs +++ b/crates/transaction-pool/src/blobstore/mem.rs @@ -16,16 +16,17 @@ pub struct InMemoryBlobStore { struct InMemoryBlobStoreInner { /// Storage for all blob data. store: RwLock>, - size: AtomicUsize, + data_size: AtomicUsize, + num_blobs: AtomicUsize, } impl InMemoryBlobStoreInner { fn add_size(&self, add: usize) { - self.size.fetch_add(add, std::sync::atomic::Ordering::Relaxed); + self.data_size.fetch_add(add, std::sync::atomic::Ordering::Relaxed); } fn sub_size(&self, sub: usize) { - self.size.fetch_sub(sub, std::sync::atomic::Ordering::Relaxed); + self.data_size.fetch_sub(sub, std::sync::atomic::Ordering::Relaxed); } fn update_size(&self, add: usize, sub: usize) { @@ -35,6 +36,10 @@ impl InMemoryBlobStoreInner { self.sub_size(sub - add); } } + + fn update_len(&self, len: usize) { + self.num_blobs.store(len, std::sync::atomic::Ordering::Relaxed); + } } impl BlobStore for InMemoryBlobStore { @@ -42,6 +47,7 @@ impl BlobStore for InMemoryBlobStore { let mut store = self.inner.store.write(); let (add, sub) = insert_size(&mut store, tx, data); self.inner.update_size(add, sub); + self.inner.update_len(store.len()); Ok(()) } @@ -58,6 +64,7 @@ impl BlobStore for InMemoryBlobStore { total_sub += sub; } self.inner.update_size(total_add, total_sub); + self.inner.update_len(store.len()); Ok(()) } @@ -65,6 +72,7 @@ impl BlobStore for InMemoryBlobStore { let mut store = self.inner.store.write(); let sub = remove_size(&mut store, &tx); self.inner.sub_size(sub); + self.inner.update_len(store.len()); Ok(()) } @@ -78,6 +86,7 @@ impl BlobStore for InMemoryBlobStore { total_sub += remove_size(&mut store, &tx); } self.inner.sub_size(total_sub); + self.inner.update_len(store.len()); Ok(()) } @@ -103,7 +112,11 @@ impl BlobStore for InMemoryBlobStore { } fn data_size_hint(&self) -> Option { - Some(self.inner.size.load(std::sync::atomic::Ordering::Relaxed)) + Some(self.inner.data_size.load(std::sync::atomic::Ordering::Relaxed)) + } + + fn blobs_len(&self) -> usize { + self.inner.num_blobs.load(std::sync::atomic::Ordering::Relaxed) } } diff --git a/crates/transaction-pool/src/blobstore/mod.rs b/crates/transaction-pool/src/blobstore/mod.rs index dcc6764389e..786bbcd4f4d 100644 --- a/crates/transaction-pool/src/blobstore/mod.rs +++ b/crates/transaction-pool/src/blobstore/mod.rs @@ -1,14 +1,14 @@ //! Storage for blob data of EIP4844 transactions. +pub use mem::InMemoryBlobStore; +pub use noop::NoopBlobStore; use reth_primitives::{BlobTransactionSidecar, H256}; use std::fmt; -mod maintain; +pub use tracker::{BlobStoreCanonTracker, BlobStoreUpdates}; + mod mem; mod noop; - -pub use maintain::BlobStoreMaintainer; -pub use mem::InMemoryBlobStore; -pub use noop::NoopBlobStore; +mod tracker; /// A blob store that can be used to store blob data of EIP4844 transactions. /// @@ -43,6 +43,9 @@ pub trait BlobStore: fmt::Debug + Send + Sync + 'static { /// Data size of all transactions in the blob store. fn data_size_hint(&self) -> Option; + + /// How many blobs are in the blob store. + fn blobs_len(&self) -> usize; } /// Error variants that can occur when interacting with a blob store. diff --git a/crates/transaction-pool/src/blobstore/noop.rs b/crates/transaction-pool/src/blobstore/noop.rs index d21bf59ef18..3cb30a22e9e 100644 --- a/crates/transaction-pool/src/blobstore/noop.rs +++ b/crates/transaction-pool/src/blobstore/noop.rs @@ -37,4 +37,8 @@ impl BlobStore for NoopBlobStore { fn data_size_hint(&self) -> Option { Some(0) } + + fn blobs_len(&self) -> usize { + 0 + } } diff --git a/crates/transaction-pool/src/blobstore/tracker.rs b/crates/transaction-pool/src/blobstore/tracker.rs new file mode 100644 index 00000000000..20461e11265 --- /dev/null +++ b/crates/transaction-pool/src/blobstore/tracker.rs @@ -0,0 +1,94 @@ +//! Support for maintaining the blob pool. + +use reth_primitives::{BlockNumber, H256}; +use reth_provider::chain::ChainBlocks; +use std::collections::BTreeMap; + +/// The type that is used to track canonical blob transactions. +#[derive(Debug, Default, Eq, PartialEq)] +pub struct BlobStoreCanonTracker { + /// Keeps track of the blob transactions included in blocks. + blob_txs_in_blocks: BTreeMap>, +} + +impl BlobStoreCanonTracker { + /// Adds a block to the blob store maintenance. + pub fn add_block( + &mut self, + block_number: BlockNumber, + blob_txs: impl IntoIterator, + ) { + self.blob_txs_in_blocks.insert(block_number, blob_txs.into_iter().collect()); + } + + /// Adds all blocks to the tracked list of blocks. + pub fn add_blocks( + &mut self, + blocks: impl IntoIterator)>, + ) { + for (block_number, blob_txs) in blocks { + self.add_block(block_number, blob_txs); + } + } + + /// Adds all blob transactions from the given chain to the tracker. + pub fn add_new_chain_blocks(&mut self, blocks: &ChainBlocks<'_>) { + let blob_txs = blocks.iter().map(|(num, blocks)| { + let iter = + blocks.body.iter().filter(|tx| tx.transaction.is_eip4844()).map(|tx| tx.hash); + (*num, iter) + }); + self.add_blocks(blob_txs); + } + + /// Invoked when a block is finalized. + pub fn on_finalized_block(&mut self, number: BlockNumber) -> BlobStoreUpdates { + let mut finalized = Vec::new(); + while let Some(entry) = self.blob_txs_in_blocks.first_entry() { + if *entry.key() <= number { + finalized.extend(entry.remove_entry().1); + } else { + break + } + } + + if finalized.is_empty() { + BlobStoreUpdates::None + } else { + BlobStoreUpdates::Finalized(finalized) + } + } +} + +/// Updates that should be applied to the blob store. +#[derive(Debug, Eq, PartialEq)] +pub enum BlobStoreUpdates { + /// No updates. + None, + /// Delete the given finalized transactions from the blob store. + Finalized(Vec), +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_finalized_tracker() { + let mut tracker = BlobStoreCanonTracker::default(); + + let block1 = vec![H256::random()]; + let block2 = vec![H256::random()]; + let block3 = vec![H256::random()]; + tracker.add_block(1, block1.clone()); + tracker.add_block(2, block2.clone()); + tracker.add_block(3, block3.clone()); + + assert_eq!(tracker.on_finalized_block(0), BlobStoreUpdates::None); + assert_eq!(tracker.on_finalized_block(1), BlobStoreUpdates::Finalized(block1)); + assert_eq!( + tracker.on_finalized_block(3), + BlobStoreUpdates::Finalized(block2.into_iter().chain(block3).collect::>()) + ); + } +} diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index 12ccfbebe5c..fd2ca0bf7e4 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -138,6 +138,9 @@ pub enum InvalidPoolTransactionError { /// Thrown if the transaction's fee is below the minimum fee #[error("transaction underpriced")] Underpriced, + /// Thrown if we're unable to find the blob for a transaction that was previously extracted + #[error("blob not found for EIP4844 transaction")] + MissingEip4844Blob, /// Any other error that occurred while inserting/validating that is transaction specific #[error("{0:?}")] Other(Box), @@ -195,6 +198,11 @@ impl InvalidPoolTransactionError { false } InvalidPoolTransactionError::Other(err) => err.is_bad_transaction(), + InvalidPoolTransactionError::MissingEip4844Blob => { + // this is only reachable when blob transactions are reinjected and we're unable to + // find the previously extracted blob + false + } } } } diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 6f2243efc00..1a58a697979 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -156,7 +156,7 @@ //! - `test-utils`: Export utilities for testing use crate::pool::PoolInner; use aquamarine as _; -use reth_primitives::{Address, BlobTransactionSidecar, TxHash, U256}; +use reth_primitives::{Address, BlobTransactionSidecar, PooledTransactionsElement, TxHash, U256}; use reth_provider::StateProviderFactory; use std::{ collections::{HashMap, HashSet}, @@ -180,7 +180,8 @@ pub use crate::{ }, traits::{ AllPoolTransactions, BestTransactions, BlockInfo, CanonicalStateUpdate, ChangedAccount, - EthPooledTransaction, NewTransactionEvent, PendingTransactionListenerKind, PoolSize, + EthBlobTransactionSidecar, EthPoolTransaction, EthPooledTransaction, + GetPooledTransactionLimit, NewTransactionEvent, PendingTransactionListenerKind, PoolSize, PoolTransaction, PropagateKind, PropagatedTransactions, TransactionOrigin, TransactionPool, TransactionPoolExt, }, @@ -403,6 +404,14 @@ where self.pooled_transactions().into_iter().take(max).collect() } + fn get_pooled_transaction_elements( + &self, + tx_hashes: Vec, + limit: GetPooledTransactionLimit, + ) -> Vec { + self.pool.get_pooled_transaction_elements(tx_hashes, limit) + } + fn best_transactions( &self, ) -> Box>>> { @@ -486,13 +495,21 @@ where self.pool.set_block_info(info) } - fn on_canonical_state_change(&self, update: CanonicalStateUpdate) { + fn on_canonical_state_change(&self, update: CanonicalStateUpdate<'_>) { self.pool.on_canonical_state_change(update); } fn update_accounts(&self, accounts: Vec) { self.pool.update_accounts(accounts); } + + fn delete_blob(&self, tx: TxHash) { + self.pool.delete_blob(tx) + } + + fn delete_blobs(&self, txs: Vec) { + self.pool.delete_blobs(txs) + } } impl Clone for Pool { diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 05bb7824a09..fd3c83a4854 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -1,6 +1,7 @@ //! Support for maintaining the state of the transaction pool use crate::{ + blobstore::{BlobStoreCanonTracker, BlobStoreUpdates}, metrics::MaintainPoolMetrics, traits::{CanonicalStateUpdate, ChangedAccount, TransactionPoolExt}, BlockInfo, TransactionPool, @@ -9,7 +10,9 @@ use futures_util::{ future::{BoxFuture, Fuse, FusedFuture}, FutureExt, Stream, StreamExt, }; -use reth_primitives::{Address, BlockHash, BlockNumberOrTag, FromRecoveredTransaction}; +use reth_primitives::{ + Address, BlockHash, BlockNumber, BlockNumberOrTag, FromRecoveredTransaction, +}; use reth_provider::{ BlockReaderIdExt, CanonStateNotification, ChainSpecProvider, PostState, StateProviderFactory, }; @@ -93,6 +96,13 @@ pub async fn maintain_transaction_pool( pool.set_block_info(info); } + // keeps track of mined blob transaction so we can clean finalized transactions + let mut blob_store_tracker = BlobStoreCanonTracker::default(); + + // keeps track of the latest finalized block + let mut last_finalized_block = + FinalizedBlockTracker::new(client.finalized_block_number().ok().flatten()); + // keeps track of any dirty accounts that we know of are out of sync with the pool let mut dirty_addresses = HashSet::new(); @@ -150,6 +160,19 @@ pub async fn maintain_transaction_pool( task_spawner.spawn_blocking(fut); } + // check if we have a new finalized block + if let Some(finalized) = + last_finalized_block.update(client.finalized_block_number().ok().flatten()) + { + match blob_store_tracker.on_finalized_block(finalized) { + BlobStoreUpdates::None => {} + BlobStoreUpdates::Finalized(blobs) => { + // remove all finalized blobs from the blob store + pool.delete_blobs(blobs); + } + } + } + // outcomes of the futures we are waiting on let mut event = None; let mut reloaded = None; @@ -267,13 +290,11 @@ pub async fn maintain_transaction_pool( // update the pool first let update = CanonicalStateUpdate { - hash: new_tip.hash, - number: new_tip.number, + new_tip: &new_tip.block, pending_block_base_fee, changed_accounts, // all transactions mined in the new chain need to be removed from the pool mined_transactions: new_mined_transactions.into_iter().collect(), - timestamp: new_tip.timestamp, }; pool.on_canonical_state_change(update); @@ -283,6 +304,10 @@ pub async fn maintain_transaction_pool( // Note: we no longer know if the tx was local or external metrics.inc_reinserted_transactions(pruned_old_transactions.len()); let _ = pool.add_external_transactions(pruned_old_transactions).await; + + // keep track of mined blob transactions + // TODO(mattsse): handle reorged transactions + blob_store_tracker.add_new_chain_blocks(&new_blocks); } CanonStateNotification::Commit { new } => { let (blocks, state) = new.inner(); @@ -314,6 +339,10 @@ pub async fn maintain_transaction_pool( pending_basefee: pending_block_base_fee, }; pool.set_block_info(info); + + // keep track of mined blob transactions + blob_store_tracker.add_new_chain_blocks(&blocks); + continue } @@ -336,15 +365,45 @@ pub async fn maintain_transaction_pool( // Canonical update let update = CanonicalStateUpdate { - hash: tip.hash, - number: tip.number, + new_tip: &tip.block, pending_block_base_fee, changed_accounts, mined_transactions, - timestamp: tip.timestamp, }; pool.on_canonical_state_change(update); + + // keep track of mined blob transactions + blob_store_tracker.add_new_chain_blocks(&blocks); + } + } + } +} + +struct FinalizedBlockTracker { + last_finalized_block: Option, +} + +impl FinalizedBlockTracker { + fn new(last_finalized_block: Option) -> Self { + Self { last_finalized_block } + } + + /// Updates the tracked finalized block and returns the new finalized block if it changed + fn update(&mut self, finalized_block: Option) -> Option { + match (self.last_finalized_block, finalized_block) { + (Some(last), Some(finalized)) => { + self.last_finalized_block = Some(finalized); + if last < finalized { + Some(finalized) + } else { + None + } + } + (None, Some(finalized)) => { + self.last_finalized_block = Some(finalized); + Some(finalized) } + _ => None, } } } diff --git a/crates/transaction-pool/src/metrics.rs b/crates/transaction-pool/src/metrics.rs index 85c3d707f60..966834c63b3 100644 --- a/crates/transaction-pool/src/metrics.rs +++ b/crates/transaction-pool/src/metrics.rs @@ -46,6 +46,10 @@ pub struct BlobStoreMetrics { pub(crate) blobstore_failed_inserts: Counter, /// Number of failed deletes into the blobstore pub(crate) blobstore_failed_deletes: Counter, + /// The number of bytes the blobs in the blobstore take up + pub(crate) blobstore_byte_size: Gauge, + /// How many blobs are currently in the blobstore + pub(crate) blobstore_entries: Gauge, } /// Transaction pool maintenance metrics diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 015554b53bd..3817ff8f997 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -4,9 +4,12 @@ //! to be generic over it. use crate::{ - blobstore::BlobStoreError, error::PoolError, traits::PendingTransactionListenerKind, - validate::ValidTransaction, AllPoolTransactions, AllTransactionsEvents, BestTransactions, - BlockInfo, EthPooledTransaction, NewTransactionEvent, PoolResult, PoolSize, PoolTransaction, + blobstore::BlobStoreError, + error::PoolError, + traits::{GetPooledTransactionLimit, PendingTransactionListenerKind}, + validate::ValidTransaction, + AllPoolTransactions, AllTransactionsEvents, BestTransactions, BlockInfo, EthPooledTransaction, + NewTransactionEvent, PoolResult, PoolSize, PoolTransaction, PooledTransactionsElement, PropagatedTransactions, TransactionEvents, TransactionOrigin, TransactionPool, TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, }; @@ -108,6 +111,14 @@ impl TransactionPool for NoopTransactionPool { vec![] } + fn get_pooled_transaction_elements( + &self, + _tx_hashes: Vec, + _limit: GetPooledTransactionLimit, + ) -> Vec { + vec![] + } + fn best_transactions( &self, ) -> Box>>> { diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 7beb11e8ec0..5fc5ebc9313 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -25,6 +25,10 @@ impl crate::traits::BestTransactions for BestTransaction fn mark_invalid(&mut self, tx: &Self::Item) { BestTransactions::mark_invalid(&mut self.best, tx) } + + fn no_updates(&mut self) { + self.best.no_updates() + } } impl Iterator for BestTransactionsWithBasefee { @@ -67,7 +71,7 @@ pub(crate) struct BestTransactions { /// /// These new pending transactions are inserted into this iterator's pool before yielding the /// next value - pub(crate) new_transaction_reciever: Receiver>, + pub(crate) new_transaction_receiver: Option>>, } impl BestTransactions { @@ -87,7 +91,7 @@ impl BestTransactions { /// Non-blocking read on the new pending transactions subscription channel fn try_recv(&mut self) -> Option> { loop { - match self.new_transaction_reciever.try_recv() { + match self.new_transaction_receiver.as_mut()?.try_recv() { Ok(tx) => return Some(tx), // note TryRecvError::Lagged can be returned here, which is an error that attempts // to correct itself on consecutive try_recv() attempts @@ -126,6 +130,10 @@ impl crate::traits::BestTransactions for BestTransaction fn mark_invalid(&mut self, tx: &Self::Item) { BestTransactions::mark_invalid(self, tx) } + + fn no_updates(&mut self) { + self.new_transaction_receiver.take(); + } } impl Iterator for BestTransactions { diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 3b633eec228..4efb0234f06 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -82,7 +82,10 @@ use crate::{ }; use best::BestTransactions; use parking_lot::{Mutex, RwLock}; -use reth_primitives::{Address, BlobTransactionSidecar, TxHash, H256}; +use reth_primitives::{ + Address, BlobTransaction, BlobTransactionSidecar, IntoRecoveredTransaction, + PooledTransactionsElement, TransactionSigned, TxHash, H256, +}; use std::{ collections::{HashMap, HashSet}, fmt, @@ -97,10 +100,14 @@ pub use events::{FullTransactionEvent, TransactionEvent}; mod listener; use crate::{ - blobstore::BlobStore, metrics::BlobStoreMetrics, pool::txpool::UpdateOutcome, - traits::PendingTransactionListenerKind, validate::ValidTransaction, + blobstore::BlobStore, + metrics::BlobStoreMetrics, + pool::txpool::UpdateOutcome, + traits::{GetPooledTransactionLimit, PendingTransactionListenerKind}, + validate::ValidTransaction, }; pub use listener::{AllTransactionsEvents, TransactionEvents}; +use reth_rlp::Encodable; mod best; mod parked; @@ -269,24 +276,59 @@ where pool.all().transactions_iter().filter(|tx| tx.propagate).collect() } + /// Returns the [BlobTransaction] for the given transaction if the sidecar exists. + /// + /// Caution: this assumes the given transaction is eip-4844 + fn get_blob_transaction(&self, transaction: TransactionSigned) -> Option { + if let Ok(Some(sidecar)) = self.blob_store.get(transaction.hash()) { + if let Ok(blob) = BlobTransaction::try_from_signed(transaction, sidecar) { + return Some(blob) + } + } + None + } + + /// Returns converted [PooledTransactionsElement] for the given transaction hashes. + pub(crate) fn get_pooled_transaction_elements( + &self, + tx_hashes: Vec, + limit: GetPooledTransactionLimit, + ) -> Vec { + let transactions = self.get_all(tx_hashes); + let mut elements = Vec::with_capacity(transactions.len()); + let mut size = 0; + for transaction in transactions { + let tx = transaction.to_recovered_transaction().into_signed(); + let pooled = if tx.is_eip4844() { + if let Some(blob) = self.get_blob_transaction(tx) { + PooledTransactionsElement::BlobTransaction(blob) + } else { + continue + } + } else { + PooledTransactionsElement::from(tx) + }; + + size += pooled.length(); + elements.push(pooled); + + if limit.exceeds(size) { + break + } + } + + elements + } + /// Updates the entire pool after a new block was executed. - pub(crate) fn on_canonical_state_change(&self, update: CanonicalStateUpdate) { + pub(crate) fn on_canonical_state_change(&self, update: CanonicalStateUpdate<'_>) { trace!(target: "txpool", %update, "updating pool on canonical state change"); - let CanonicalStateUpdate { - hash, - number, - pending_block_base_fee, - changed_accounts, - mined_transactions, - timestamp: _, - } = update; + let block_info = update.block_info(); + let CanonicalStateUpdate { new_tip, changed_accounts, mined_transactions, .. } = update; + self.validator.on_new_head_block(new_tip); + let changed_senders = self.changed_senders(changed_accounts.into_iter()); - let block_info = BlockInfo { - last_seen_block_hash: hash, - last_seen_block_number: number, - pending_basefee: pending_block_base_fee, - }; // update the pool let outcome = self.pool.write().on_canonical_state_change( @@ -309,7 +351,7 @@ where let mut listener = self.event_listener.write(); promoted.iter().for_each(|tx| listener.pending(tx.hash(), None)); - discarded.iter().for_each(|tx| listener.discarded(tx)); + discarded.iter().for_each(|tx| listener.discarded(tx.hash())); } /// Add a single validated transaction into the pool. @@ -526,7 +568,7 @@ where mined.iter().for_each(|tx| listener.mined(tx, block_hash)); promoted.iter().for_each(|tx| listener.pending(tx.hash(), None)); - discarded.iter().for_each(|tx| listener.discarded(tx)); + discarded.iter().for_each(|tx| listener.discarded(tx.hash())); } /// Fire events for the newly added transaction if there are any. @@ -539,7 +581,7 @@ where listener.pending(transaction.hash(), replaced.clone()); promoted.iter().for_each(|tx| listener.pending(tx.hash(), None)); - discarded.iter().for_each(|tx| listener.discarded(tx)); + discarded.iter().for_each(|tx| listener.discarded(tx.hash())); } AddedTransaction::Parked { transaction, replaced, .. } => { listener.queued(transaction.hash()); @@ -659,14 +701,33 @@ where warn!(target: "txpool", ?err, "[{:?}] failed to insert blob", hash); self.blob_store_metrics.blobstore_failed_inserts.increment(1); } + self.update_blob_store_metrics(); } /// Delete a blob from the blob store - fn delete_blob(&self, blob: TxHash) { + pub(crate) fn delete_blob(&self, blob: TxHash) { if let Err(err) = self.blob_store.delete(blob) { warn!(target: "txpool", ?err, "[{:?}] failed to delete blobs", blob); self.blob_store_metrics.blobstore_failed_deletes.increment(1); } + self.update_blob_store_metrics(); + } + + /// Delete all blobs from the blob store + pub(crate) fn delete_blobs(&self, txs: Vec) { + let num = txs.len(); + if let Err(err) = self.blob_store.delete_all(txs) { + warn!(target: "txpool", ?err,?num, "failed to delete blobs"); + self.blob_store_metrics.blobstore_failed_deletes.increment(num as u64); + } + self.update_blob_store_metrics(); + } + + fn update_blob_store_metrics(&self) { + if let Some(data_size) = self.blob_store.data_size_hint() { + self.blob_store_metrics.blobstore_byte_size.set(data_size as f64); + } + self.blob_store_metrics.blobstore_entries.set(self.blob_store.blobs_len() as f64); } } @@ -694,7 +755,7 @@ pub struct AddedPendingTransaction { /// transactions promoted to the pending queue promoted: Vec>>, /// transaction that failed and became discarded - discarded: Vec, + discarded: Vec>>, } impl AddedPendingTransaction { @@ -810,7 +871,7 @@ pub(crate) struct OnNewCanonicalStateOutcome { /// Transactions promoted to the ready queue. pub(crate) promoted: Vec>>, /// transaction that were discarded during the update - pub(crate) discarded: Vec, + pub(crate) discarded: Vec>>, } impl OnNewCanonicalStateOutcome { diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index eaff315459b..65be2a7c1e8 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -88,7 +88,7 @@ impl PendingPool { all: self.by_id.clone(), independent: self.independent_transactions.clone(), invalid: Default::default(), - new_transaction_reciever: self.new_transaction_notifier.subscribe(), + new_transaction_receiver: Some(self.new_transaction_notifier.subscribe()), } } diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 8f8ba2de0f3..29a6fa9e9e6 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -416,8 +416,9 @@ impl TxPool { match destination { Destination::Discard => { // remove the transaction from the pool and subpool - self.prune_transaction_by_hash(&hash); - outcome.discarded.push(hash); + if let Some(tx) = self.prune_transaction_by_hash(&hash) { + outcome.discarded.push(tx); + } self.metrics.removed_transactions.increment(1); } Destination::Pool(move_to) => { @@ -1336,7 +1337,7 @@ pub(crate) struct UpdateOutcome { /// transactions promoted to the pending pool pub(crate) promoted: Vec>>, /// transaction that failed and were discarded - pub(crate) discarded: Vec, + pub(crate) discarded: Vec>>, } impl Default for UpdateOutcome { diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index b889639aea6..9bfcdf232da 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -12,8 +12,9 @@ use rand::{ prelude::Distribution, }; use reth_primitives::{ - constants::MIN_PROTOCOL_BASE_FEE, hex, Address, Bytes, FromRecoveredTransaction, - IntoRecoveredTransaction, Signature, Transaction, TransactionKind, TransactionSigned, + constants::MIN_PROTOCOL_BASE_FEE, hex, Address, Bytes, FromRecoveredPooledTransaction, + FromRecoveredTransaction, IntoRecoveredTransaction, PooledTransactionsElementEcRecovered, + SealedBlock, Signature, Transaction, TransactionKind, TransactionSigned, TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxEip4844, TxHash, TxLegacy, TxType, H256, U128, U256, }; @@ -580,6 +581,12 @@ impl FromRecoveredTransaction for MockTransaction { } } +impl FromRecoveredPooledTransaction for MockTransaction { + fn from_recovered_transaction(tx: PooledTransactionsElementEcRecovered) -> Self { + FromRecoveredTransaction::from_recovered_transaction(tx.into_ecrecovered_transaction()) + } +} + impl IntoRecoveredTransaction for MockTransaction { fn to_recovered_transaction(&self) -> TransactionSignedEcRecovered { let tx = Transaction::Legacy(TxLegacy { diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 1db2f7a4f6f..046bb92cb85 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -6,8 +6,9 @@ use crate::{ }; use futures_util::{ready, Stream}; use reth_primitives::{ - Address, BlobTransactionSidecar, Bytes, FromRecoveredTransaction, IntoRecoveredTransaction, - PeerId, PooledTransactionsElement, PooledTransactionsElementEcRecovered, Transaction, + Address, BlobTransactionSidecar, BlobTransactionValidationError, Bytes, + FromRecoveredPooledTransaction, FromRecoveredTransaction, IntoRecoveredTransaction, PeerId, + PooledTransactionsElement, PooledTransactionsElementEcRecovered, SealedBlock, Transaction, TransactionKind, TransactionSignedEcRecovered, TxHash, EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, H256, U256, }; @@ -22,6 +23,7 @@ use std::{ use tokio::sync::mpsc::Receiver; use crate::blobstore::BlobStoreError; +use reth_primitives::kzg::KzgSettings; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -171,8 +173,13 @@ pub trait TransactionPool: Send + Sync + Clone { /// Returns the _full_ transaction objects all transactions in the pool. /// + /// This is intended to be used by the network for the initial exchange of pooled transaction + /// _hashes_ + /// /// Note: This returns a `Vec` but should guarantee that all transactions are unique. /// + /// Caution: In case of blob transactions, this does not include the sidecar. + /// /// Consumer: P2P fn pooled_transactions(&self) -> Vec>>; @@ -184,6 +191,21 @@ pub trait TransactionPool: Send + Sync + Clone { max: usize, ) -> Vec>>; + /// Returns converted [PooledTransactionsElement] for the given transaction hashes. + /// + /// This adheres to the expected behavior of [`GetPooledTransactions`](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#getpooledtransactions-0x09): + /// The transactions must be in same order as in the request, but it is OK to skip transactions + /// which are not available. + /// + /// If the transaction is a blob transaction, the sidecar will be included. + /// + /// Consumer: P2P + fn get_pooled_transaction_elements( + &self, + tx_hashes: Vec, + limit: GetPooledTransactionLimit, + ) -> Vec; + /// Returns an iterator that yields transactions that are ready for block production. /// /// Consumer: Block production @@ -249,10 +271,7 @@ pub trait TransactionPool: Send + Sync + Clone { /// Returns all transactions objects for the given hashes. /// - /// TODO(mattsse): this will no longer be accurate and we need a new function specifically for - /// pooled txs This adheres to the expected behavior of [`GetPooledTransactions`](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#getpooledtransactions-0x09): - /// The transactions must be in same order as in the request, but it is OK to skip transactions - /// which are not available. + /// Caution: This in case of blob transactions, this does not include the sidecar. fn get_all(&self, txs: Vec) -> Vec>>; /// Notify the pool about transactions that are propagated to peers. @@ -295,10 +314,16 @@ pub trait TransactionPoolExt: TransactionPool { /// Implementers need to update the pool accordingly. /// For example the base fee of the pending block is determined after a block is mined which /// affects the dynamic fee requirement of pending transactions in the pool. - fn on_canonical_state_change(&self, update: CanonicalStateUpdate); + fn on_canonical_state_change(&self, update: CanonicalStateUpdate<'_>); /// Updates the accounts in the pool fn update_accounts(&self, accounts: Vec); + + /// Deletes the blob sidecar for the given transaction from the blob store + fn delete_blob(&self, tx: H256); + + /// Deletes multiple blob sidecars from the blob store + fn delete_blobs(&self, txs: Vec); } /// Determines what kind of new pending transactions should be emitted by a stream of pending @@ -444,11 +469,9 @@ impl TransactionOrigin { /// /// This is used to update the pool state accordingly. #[derive(Debug, Clone)] -pub struct CanonicalStateUpdate { +pub struct CanonicalStateUpdate<'a> { /// Hash of the tip block. - pub hash: H256, - /// Number of the tip block. - pub number: u64, + pub new_tip: &'a SealedBlock, /// EIP-1559 Base fee of the _next_ (pending) block /// /// The base fee of a block depends on the utilization of the last block and its base fee. @@ -457,14 +480,38 @@ pub struct CanonicalStateUpdate { pub changed_accounts: Vec, /// All mined transactions in the block range. pub mined_transactions: Vec, +} + +impl<'a> CanonicalStateUpdate<'a> { + /// Returns the number of the tip block. + pub fn number(&self) -> u64 { + self.new_tip.number + } + + /// Returns the hash of the tip block. + pub fn hash(&self) -> H256 { + self.new_tip.hash + } + /// Timestamp of the latest chain update - pub timestamp: u64, + pub fn timestamp(&self) -> u64 { + self.new_tip.timestamp + } + + /// Returns the block info for the tip block. + pub fn block_info(&self) -> BlockInfo { + BlockInfo { + last_seen_block_hash: self.hash(), + last_seen_block_number: self.number(), + pending_basefee: self.pending_block_base_fee, + } + } } -impl fmt::Display for CanonicalStateUpdate { +impl<'a> fmt::Display for CanonicalStateUpdate<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{{ hash: {}, number: {}, pending_block_base_fee: {}, changed_accounts: {}, mined_transactions: {} }}", - self.hash, self.number, self.pending_block_base_fee, self.changed_accounts.len(), self.mined_transactions.len()) + self.hash(), self.number(), self.pending_block_base_fee, self.changed_accounts.len(), self.mined_transactions.len()) } } @@ -503,16 +550,30 @@ pub trait BestTransactions: Iterator + Send { /// In other words, this must remove the given transaction _and_ drain all transaction that /// depend on it. fn mark_invalid(&mut self, transaction: &Self::Item); + + /// An iterator may be able to receive additional pending transactions that weren't present it + /// the pool when it was created. + /// + /// This ensures that iterator will return the best transaction that it currently knows and not + /// listen to pool updates. + fn no_updates(&mut self); } /// A no-op implementation that yields no transactions. impl BestTransactions for std::iter::Empty { fn mark_invalid(&mut self, _tx: &T) {} + + fn no_updates(&mut self) {} } /// Trait for transaction types used inside the pool pub trait PoolTransaction: - fmt::Debug + Send + Sync + FromRecoveredTransaction + IntoRecoveredTransaction + fmt::Debug + + Send + + Sync + + FromRecoveredPooledTransaction + + FromRecoveredTransaction + + IntoRecoveredTransaction { /// Hash of the transaction. fn hash(&self) -> &TxHash; @@ -588,6 +649,20 @@ pub trait PoolTransaction: fn is_deposit(&self) -> bool; } +/// An extension trait that provides additional interfaces for the +/// [EthTransactionValidator](crate::EthTransactionValidator). +pub trait EthPoolTransaction: PoolTransaction { + /// Extracts the blob sidecar from the transaction. + fn take_blob(&mut self) -> EthBlobTransactionSidecar; + + /// Validates the blob sidecar of the transaction with the given settings. + fn validate_blob( + &self, + blob: &BlobTransactionSidecar, + settings: &KzgSettings, + ) -> Result<(), BlobTransactionValidationError>; +} + /// The default [PoolTransaction] for the [Pool](crate::Pool) for Ethereum. /// /// This type is essentially a wrapper around [TransactionSignedEcRecovered] with additional fields @@ -607,7 +682,7 @@ pub struct EthPooledTransaction { /// Represents the blob sidecar of the [EthPooledTransaction]. #[derive(Debug, Clone, PartialEq, Eq)] -pub(crate) enum EthBlobTransactionSidecar { +pub enum EthBlobTransactionSidecar { /// This transaction does not have a blob sidecar None, /// This transaction has a blob sidecar (EIP-4844) but it is missing @@ -777,12 +852,39 @@ impl PoolTransaction for EthPooledTransaction { } } +impl EthPoolTransaction for EthPooledTransaction { + fn take_blob(&mut self) -> EthBlobTransactionSidecar { + if self.is_eip4844() { + std::mem::replace(&mut self.blob_sidecar, EthBlobTransactionSidecar::Missing) + } else { + EthBlobTransactionSidecar::None + } + } + + fn validate_blob( + &self, + sidecar: &BlobTransactionSidecar, + settings: &KzgSettings, + ) -> Result<(), BlobTransactionValidationError> { + match &self.transaction.transaction { + Transaction::Eip4844(tx) => tx.validate_blob(sidecar, settings), + _ => Err(BlobTransactionValidationError::NotBlobTransaction(self.tx_type())), + } + } +} + impl FromRecoveredTransaction for EthPooledTransaction { fn from_recovered_transaction(tx: TransactionSignedEcRecovered) -> Self { EthPooledTransaction::new(tx) } } +impl FromRecoveredPooledTransaction for EthPooledTransaction { + fn from_recovered_transaction(tx: PooledTransactionsElementEcRecovered) -> Self { + EthPooledTransaction::from(tx) + } +} + impl IntoRecoveredTransaction for EthPooledTransaction { fn to_recovered_transaction(&self) -> TransactionSignedEcRecovered { self.transaction.clone() @@ -824,6 +926,26 @@ pub struct BlockInfo { pub pending_basefee: u64, } +/// The limit to enforce for [TransactionPool::get_pooled_transaction_elements]. +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub enum GetPooledTransactionLimit { + /// No limit, return all transactions. + None, + /// Enforce a size limit on the returned transactions, for example 2MB + SizeSoftLimit(usize), +} + +impl GetPooledTransactionLimit { + /// Returns true if the given size exceeds the limit. + #[inline] + pub fn exceeds(&self, size: usize) -> bool { + match self { + GetPooledTransactionLimit::None => false, + GetPooledTransactionLimit::SizeSoftLimit(limit) => size > *limit, + } + } +} + /// A Stream that yields full transactions the subpool #[must_use = "streams do nothing unless polled"] #[derive(Debug)] diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 966efb97c57..5f6d4d816d6 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -3,17 +3,24 @@ use crate::{ blobstore::BlobStore, error::InvalidPoolTransactionError, - traits::{PoolTransaction, TransactionOrigin}, + traits::TransactionOrigin, validate::{ValidTransaction, ValidationTask, MAX_INIT_CODE_SIZE, TX_MAX_SIZE}, - TransactionValidationOutcome, TransactionValidationTaskExecutor, TransactionValidator, + EthBlobTransactionSidecar, EthPoolTransaction, TransactionValidationOutcome, + TransactionValidationTaskExecutor, TransactionValidator, }; use reth_primitives::{ - bytes::BytesMut, constants::ETHEREUM_BLOCK_GAS_LIMIT, ChainSpec, InvalidTransactionError, - EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, + bytes::BytesMut, + constants::{eip4844::KZG_TRUSTED_SETUP, ETHEREUM_BLOCK_GAS_LIMIT}, + kzg::KzgSettings, + ChainSpec, InvalidTransactionError, SealedBlock, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, + EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; use reth_provider::{AccountReader, BlockReaderIdExt, StateProviderFactory}; use reth_tasks::TaskSpawner; -use std::{marker::PhantomData, sync::Arc}; +use std::{ + marker::PhantomData, + sync::{atomic::AtomicBool, Arc}, +}; use tokio::sync::Mutex; #[cfg(feature = "optimism")] @@ -23,18 +30,39 @@ use reth_revm::optimism::L1BlockInfo; use reth_primitives::BlockNumberOrTag; /// Validator for Ethereum transactions. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct EthTransactionValidator where Client: BlockReaderIdExt, { /// The type that performs the actual validation. - pub inner: Arc>, + inner: Arc>, +} + +#[async_trait::async_trait] +impl TransactionValidator for EthTransactionValidator +where + Client: StateProviderFactory + BlockReaderIdExt, + Tx: EthPoolTransaction, +{ + type Transaction = Tx; + + async fn validate_transaction( + &self, + origin: TransactionOrigin, + transaction: Self::Transaction, + ) -> TransactionValidationOutcome { + self.inner.validate_transaction(origin, transaction).await + } + + fn on_new_head_block(&self, new_tip_block: &SealedBlock) { + self.inner.on_new_head_block(new_tip_block) + } } /// A [TransactionValidator] implementation that validates ethereum transaction. #[derive(Debug)] -pub struct EthTransactionValidatorInner +pub(crate) struct EthTransactionValidatorInner where Client: BlockReaderIdExt, { @@ -43,12 +71,9 @@ where /// This type fetches account info from the db client: Client, /// Blobstore used for fetching re-injected blob transactions. - #[allow(unused)] blob_store: Box, - /// Fork indicator whether we are in the Shanghai stage. - shanghai: bool, - /// Fork indicator whether we are in the Cancun hardfork. - cancun: bool, + /// tracks activated forks relevant for transaction validation + fork_tracker: ForkTracker, /// Fork indicator whether we are using EIP-2718 type transactions. eip2718: bool, /// Fork indicator whether we are using EIP-1559 type transactions. @@ -61,6 +86,9 @@ where minimum_priority_fee: Option, /// Toggle to determine if a local transaction should be propagated propagate_local_transactions: bool, + /// Stores the setup and parameters needed for validating KZG proofs. + #[allow(unused)] + kzg_settings: Arc, /// Marker for the transaction type _marker: PhantomData, } @@ -72,7 +100,7 @@ where Client: BlockReaderIdExt, { /// Returns the configured chain id - pub fn chain_id(&self) -> u64 { + pub(crate) fn chain_id(&self) -> u64 { self.chain_spec.chain().id() } } @@ -81,14 +109,14 @@ where impl TransactionValidator for EthTransactionValidatorInner where Client: StateProviderFactory + BlockReaderIdExt, - Tx: PoolTransaction, + Tx: EthPoolTransaction, { type Transaction = Tx; async fn validate_transaction( &self, origin: TransactionOrigin, - transaction: Self::Transaction, + mut transaction: Self::Transaction, ) -> TransactionValidationOutcome { #[cfg(feature = "optimism")] if transaction.is_deposit() { @@ -149,7 +177,7 @@ where } // Check whether the init code size has been exceeded. - if self.shanghai { + if self.fork_tracker.is_shanghai_activated() { if let Err(err) = self.ensure_max_init_code_size(&transaction, MAX_INIT_CODE_SIZE) { return TransactionValidationOutcome::Invalid(transaction, err) } @@ -194,9 +222,42 @@ where } } + let mut blob_sidecar = None; + // blob tx checks - if self.cancun { - // TODO: implement blob tx checks + if transaction.is_eip4844() { + // Cancun fork is required for blob txs + if !self.fork_tracker.is_cancun_activated() { + return TransactionValidationOutcome::Invalid( + transaction, + InvalidTransactionError::TxTypeNotSupported.into(), + ) + } + + // extract the blob from the transaction + match transaction.take_blob() { + EthBlobTransactionSidecar::None => { + // this should not happen + return TransactionValidationOutcome::Invalid( + transaction, + InvalidTransactionError::TxTypeNotSupported.into(), + ) + } + EthBlobTransactionSidecar::Missing => { + if let Ok(Some(_)) = self.blob_store.get(*transaction.hash()) { + // validated transaction is already in the store + } else { + return TransactionValidationOutcome::Invalid( + transaction, + InvalidPoolTransactionError::MissingEip4844Blob, + ) + } + } + EthBlobTransactionSidecar::Present(blob) => { + //TODO(mattsse): verify the blob + blob_sidecar = Some(blob); + } + } } let account = match self @@ -278,7 +339,7 @@ where TransactionValidationOutcome::Valid { balance: account.balance, state_nonce: account.nonce, - transaction: ValidTransaction::Valid(transaction), + transaction: ValidTransaction::new(transaction, blob_sidecar), // by this point assume all external transactions should be propagated propagate: match origin { TransactionOrigin::External => true, @@ -287,6 +348,17 @@ where }, } } + + fn on_new_head_block(&self, new_tip_block: &SealedBlock) { + // update all forks + if self.chain_spec.is_cancun_activated_at_timestamp(new_tip_block.timestamp) { + self.fork_tracker.cancun.store(true, std::sync::atomic::Ordering::Relaxed); + } + + if self.chain_spec.is_shanghai_activated_at_timestamp(new_tip_block.timestamp) { + self.fork_tracker.shanghai.store(true, std::sync::atomic::Ordering::Relaxed); + } + } } /// A builder for [TransactionValidationTaskExecutor] @@ -297,11 +369,11 @@ pub struct EthTransactionValidatorBuilder { shanghai: bool, /// Fork indicator whether we are in the Cancun hardfork. cancun: bool, - /// Fork indicator whether we are using EIP-2718 type transactions. + /// Whether using EIP-2718 type transactions is allowed eip2718: bool, - /// Fork indicator whether we are using EIP-1559 type transactions. + /// Whether using EIP-1559 type transactions is allowed eip1559: bool, - /// Fork indicator whether we are using EIP-4844 blob transactions. + /// Whether using EIP-4844 type transactions is allowed eip4844: bool, /// The current max gas limit block_gas_limit: u64, @@ -313,6 +385,9 @@ pub struct EthTransactionValidatorBuilder { additional_tasks: usize, /// Toggle to determine if a local transaction should be propagated propagate_local_transactions: bool, + + /// Stores the setup and parameters needed for validating KZG proofs. + kzg_settings: Arc, } impl EthTransactionValidatorBuilder { @@ -320,18 +395,23 @@ impl EthTransactionValidatorBuilder { pub fn new(chain_spec: Arc) -> Self { Self { chain_spec, - shanghai: true, - eip2718: true, - eip1559: true, block_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, minimum_priority_fee: None, additional_tasks: 1, // default to true, can potentially take this as a param in the future propagate_local_transactions: true, + kzg_settings: Arc::clone(&KZG_TRUSTED_SETUP), + + // by default all transaction types are allowed + eip2718: true, + eip1559: true, + eip4844: true, + + // shanghai is activated by default + shanghai: true, - // TODO: can hard enable by default once transitioned + // TODO: can hard enable by default once mainnet transitioned cancun: false, - eip4844: false, } } @@ -378,8 +458,15 @@ impl EthTransactionValidatorBuilder { self.eip1559 = eip1559; self } + + /// Sets the [KzgSettings] to use for validating KZG proofs. + pub fn kzg_settings(mut self, kzg_settings: Arc) -> Self { + self.kzg_settings = kzg_settings; + self + } + /// Sets toggle to propagate transactions received locally by this client (e.g - /// transactions from eth_Sendtransaction to this nodes' RPC server) + /// transactions from eth_sendTransaction to this nodes' RPC server) /// /// If set to false, only transactions received by network peers (via /// p2p) will be marked as propagated in the local transaction pool and returned on a @@ -388,7 +475,7 @@ impl EthTransactionValidatorBuilder { self.propagate_local_transactions = propagate_local_txs; self } - /// Disables propagating transactions recieved locally by this client + /// Disables propagating transactions received locally by this client /// /// For more information, check docs for set_propagate_local_transactions pub fn no_local_transaction_propagation(mut self) -> Self { @@ -436,20 +523,24 @@ impl EthTransactionValidatorBuilder { minimum_priority_fee, additional_tasks, propagate_local_transactions, + kzg_settings, } = self; + let fork_tracker = + ForkTracker { shanghai: AtomicBool::new(shanghai), cancun: AtomicBool::new(cancun) }; + let inner = EthTransactionValidatorInner { chain_spec, client, - shanghai, eip2718, eip1559, - cancun, + fork_tracker, eip4844, block_gas_limit, minimum_priority_fee, propagate_local_transactions, blob_store: Box::new(blob_store), + kzg_settings, _marker: Default::default(), }; @@ -479,6 +570,27 @@ impl EthTransactionValidatorBuilder { } } +/// Keeps track of whether certain forks are activated +#[derive(Debug)] +pub(crate) struct ForkTracker { + /// Tracks if shanghai is activated at the block's timestamp. + pub(crate) shanghai: AtomicBool, + /// Tracks if cancun is activated at the block's timestamp. + pub(crate) cancun: AtomicBool, +} + +impl ForkTracker { + /// Returns true if the Shanghai fork is activated. + pub(crate) fn is_shanghai_activated(&self) -> bool { + self.shanghai.load(std::sync::atomic::Ordering::Relaxed) + } + + /// Returns true if the Shanghai fork is activated. + pub(crate) fn is_cancun_activated(&self) -> bool { + self.cancun.load(std::sync::atomic::Ordering::Relaxed) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 1ec2bd07f4e..d99100961a8 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -6,7 +6,7 @@ use crate::{ traits::{PoolTransaction, TransactionOrigin}, }; use reth_primitives::{ - Address, BlobTransactionSidecar, IntoRecoveredTransaction, TransactionKind, + Address, BlobTransactionSidecar, IntoRecoveredTransaction, SealedBlock, TransactionKind, TransactionSignedEcRecovered, TxHash, H256, U256, }; use std::{fmt, time::Instant}; @@ -86,6 +86,17 @@ pub enum ValidTransaction { }, } +impl ValidTransaction { + /// Creates a new valid transaction with an optional sidecar. + pub fn new(transaction: T, sidecar: Option) -> Self { + if let Some(sidecar) = sidecar { + Self::ValidWithSidecar { transaction, sidecar } + } else { + Self::Valid(transaction) + } + } +} + impl ValidTransaction { #[inline] pub(crate) fn transaction(&self) -> &T { @@ -157,6 +168,11 @@ pub trait TransactionValidator: Send + Sync { transaction: Self::Transaction, ) -> TransactionValidationOutcome; + /// Invoked when the head block changes. + /// + /// This can be used to update fork specific values (timestamp). + fn on_new_head_block(&self, _new_tip_block: &SealedBlock) {} + /// Ensure that the code size is not greater than `max_init_code_size`. /// `max_init_code_size` should be configurable so this will take it as an argument. fn ensure_max_init_code_size( diff --git a/crates/transaction-pool/src/validate/task.rs b/crates/transaction-pool/src/validate/task.rs index d35f2acb879..035ba0045f1 100644 --- a/crates/transaction-pool/src/validate/task.rs +++ b/crates/transaction-pool/src/validate/task.rs @@ -7,8 +7,8 @@ use crate::{ TransactionValidator, }; use futures_util::{lock::Mutex, StreamExt}; -use reth_primitives::ChainSpec; -use reth_provider::{BlockReaderIdExt, StateProviderFactory}; +use reth_primitives::{ChainSpec, SealedBlock}; +use reth_provider::BlockReaderIdExt; use reth_tasks::TaskSpawner; use std::{future::Future, pin::Pin, sync::Arc}; use tokio::{ @@ -135,14 +135,9 @@ where .with_additional_tasks(num_additional_tasks) .build_with_tasks::(client, tasks, blob_store) } - - /// Returns the configured chain id - pub fn chain_id(&self) -> u64 { - self.validator.inner.chain_id() - } } -impl TransactionValidationTaskExecutor { +impl TransactionValidationTaskExecutor { /// Creates a new executor instance with the given validator for transaction validation. /// /// Initializes the executor with the provided validator and sets up communication for @@ -154,13 +149,11 @@ impl TransactionValidationTaskExecutor { } #[async_trait::async_trait] -impl TransactionValidator - for TransactionValidationTaskExecutor> +impl TransactionValidator for TransactionValidationTaskExecutor where - Client: StateProviderFactory + BlockReaderIdExt + Clone + 'static, - Tx: PoolTransaction + Clone + 'static, + V: TransactionValidator + Clone + 'static, { - type Transaction = Tx; + type Transaction = ::Transaction; async fn validate_transaction( &self, @@ -172,7 +165,7 @@ where { let to_validation_task = self.to_validation_task.clone(); let to_validation_task = to_validation_task.lock().await; - let validator = Arc::clone(&self.validator.inner); + let validator = self.validator.clone(); let res = to_validation_task .send(Box::pin(async move { let res = validator.validate_transaction(origin, transaction).await; @@ -195,4 +188,8 @@ where ), } } + + fn on_new_head_block(&self, new_tip_block: &SealedBlock) { + self.validator.on_new_head_block(new_tip_block) + } } diff --git a/crates/trie/src/proof.rs b/crates/trie/src/proof.rs index 1450843d8c4..1ccd114ad3b 100644 --- a/crates/trie/src/proof.rs +++ b/crates/trie/src/proof.rs @@ -41,21 +41,25 @@ pub struct Proof<'a, 'b, TX, H> { hashed_cursor_factory: &'b H, } -impl<'a, 'tx, TX> Proof<'a, 'a, TX, TX> -where - TX: DbTx<'tx> + HashedCursorFactory<'a>, -{ +impl<'a, TX> Proof<'a, 'a, TX, TX> { /// Create a new [Proof] instance. pub fn new(tx: &'a TX) -> Self { Self { tx, hashed_cursor_factory: tx } } +} +impl<'a, 'b, 'tx, TX, H> Proof<'a, 'b, TX, H> +where + TX: DbTx<'tx>, + H: HashedCursorFactory<'b>, +{ /// Generate an account proof from intermediate nodes. pub fn account_proof(&self, address: Address) -> Result, ProofError> { let hashed_address = keccak256(address); let target_nibbles = Nibbles::unpack(hashed_address); - let mut proof_restorer = ProofRestorer::new(self.hashed_cursor_factory)?; + let mut proof_restorer = + ProofRestorer::new(self.tx)?.with_hashed_cursor_factory(self.hashed_cursor_factory)?; let mut trie_cursor = AccountTrieCursor::new(self.tx.cursor_read::()?); @@ -96,7 +100,7 @@ where fn traverse_path>( &self, trie_cursor: &mut AccountTrieCursor, - proof_restorer: &mut ProofRestorer<'a, 'a, TX, TX>, + proof_restorer: &mut ProofRestorer<'a, 'b, TX, H>, hashed_address: H256, ) -> Result, ProofError> { let mut intermediate_proofs = Vec::new(); @@ -142,7 +146,7 @@ where impl<'a, 'tx, TX> ProofRestorer<'a, 'a, TX, TX> where - TX: DbTx<'tx> + HashedCursorFactory<'a>, + TX: DbTx<'tx>, { fn new(tx: &'a TX) -> Result { let hashed_account_cursor = tx.hashed_account_cursor()?; @@ -154,6 +158,30 @@ where node_rlp_buf: Vec::with_capacity(128), }) } +} + +impl<'a, 'b, 'tx, TX, H> ProofRestorer<'a, 'b, TX, H> +where + TX: DbTx<'tx> + HashedCursorFactory<'a>, + H: HashedCursorFactory<'b>, +{ + /// Set the hashed cursor factory. + fn with_hashed_cursor_factory<'c, HF>( + self, + hashed_cursor_factory: &'c HF, + ) -> Result, ProofError> + where + HF: HashedCursorFactory<'c>, + { + let hashed_account_cursor = hashed_cursor_factory.hashed_account_cursor()?; + Ok(ProofRestorer { + tx: self.tx, + hashed_cursor_factory, + hashed_account_cursor, + account_rlp_buf: self.account_rlp_buf, + node_rlp_buf: self.node_rlp_buf, + }) + } fn restore_branch_node( &mut self, diff --git a/docs/crates/db.md b/docs/crates/db.md index 79f93b1efad..58729fc1434 100644 --- a/docs/crates/db.md +++ b/docs/crates/db.md @@ -8,7 +8,7 @@ The database is a central component to Reth, enabling persistent storage for dat Within Reth, the database is organized via "tables". A table is any struct that implements the `Table` trait. -[File: crates/storage/db/src/abstraction/table.rs](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db/src/abstraction/table.rs#L56-L65) +[File: crates/storage/db/src/abstraction/table.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/storage/db/src/abstraction/table.rs#L55-L82) ```rust ignore pub trait Table: Send + Sync + Debug + 'static { @@ -23,7 +23,7 @@ pub trait Table: Send + Sync + Debug + 'static { } //--snip-- -pub trait Key: Encode + Decode + Ord {} +pub trait Key: Encode + Decode + Ord + Clone + Serialize + for<'a> Deserialize<'a> {} //--snip-- pub trait Value: Compress + Decompress + Serialize {} @@ -32,38 +32,42 @@ pub trait Value: Compress + Decompress + Serialize {} The `Table` trait has two generic values, `Key` and `Value`, which need to implement the `Key` and `Value` traits, respectively. The `Encode` trait is responsible for transforming data into bytes so it can be stored in the database, while the `Decode` trait transforms the bytes back into its original form. Similarly, the `Compress` and `Decompress` traits transform the data to and from a compressed format when storing or reading data from the database. -There are many tables within the node, all used to store different types of data from `Headers` to `Transactions` and more. Below is a list of all of the tables. You can follow [this link](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db/src/tables/mod.rs#L36) if you would like to see the table definitions for any of the tables below. +There are many tables within the node, all used to store different types of data from `Headers` to `Transactions` and more. Below is a list of all of the tables. You can follow [this link](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/storage/db/src/tables/mod.rs#L161-L188) if you would like to see the table definitions for any of the tables below. - CanonicalHeaders - HeaderTD - HeaderNumbers - Headers -- BlockBodies +- BlockBodyIndices - BlockOmmers +- BlockWithdrawals +- TransactionBlock - Transactions - TxHashNumber - Receipts -- Logs - PlainAccountState - PlainStorageState - Bytecodes -- BlockTransitionIndex -- TxTransitionIndex - AccountHistory - StorageHistory - AccountChangeSet - StorageChangeSet +- HashedAccount +- HashedStorage +- AccountsTrie +- StoragesTrie - TxSenders -- Config - SyncStage +- SyncStageProgress +- PruneCheckpoints
## Database -Reth's database design revolves around it's main [Database trait](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/interfaces/src/db/mod.rs#L33), which takes advantage of [generic associated types](https://blog.rust-lang.org/2022/10/28/gats-stabilization.html) and [a few design tricks](https://sabrinajewson.org/blog/the-better-alternative-to-lifetime-gats#the-better-gats) to implement the database's functionality across many types. Let's take a quick look at the `Database` trait and how it works. +Reth's database design revolves around it's main [Database trait](https://github.com/paradigmxyz/reth/blob/eaca2a4a7fbbdc2f5cd15eab9a8a18ede1891bda/crates/storage/db/src/abstraction/database.rs#L21), which takes advantage of [generic associated types](https://blog.rust-lang.org/2022/10/28/gats-stabilization.html) and [a few design tricks](https://sabrinajewson.org/blog/the-better-alternative-to-lifetime-gats#the-better-gats) to implement the database's functionality across many types. Let's take a quick look at the `Database` trait and how it works. -[File: crates/storage/db/src/abstraction/database.rs](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db/src/abstraction/database.rs#L19) +[File: crates/storage/db/src/abstraction/database.rs](https://github.com/paradigmxyz/reth/blob/eaca2a4a7fbbdc2f5cd15eab9a8a18ede1891bda/crates/storage/db/src/abstraction/database.rs#L21) ```rust ignore /// Main Database trait that spawns transactions to be executed. diff --git a/docs/crates/discv4.md b/docs/crates/discv4.md index fe37cf463db..45ab4a52f84 100644 --- a/docs/crates/discv4.md +++ b/docs/crates/discv4.md @@ -5,16 +5,20 @@ The `discv4` crate plays an important role in Reth, enabling discovery of other ## Starting the Node Discovery Protocol As mentioned in the network and stages chapters, when the node is first started up, the `node::Command::execute()` function is called, which initializes the node and starts to run the Reth pipeline. Throughout the initialization of the node, there are many processes that are are started. One of the processes that is initialized is the p2p network which starts the node discovery protocol amongst other tasks. -[File: bin/reth/src/node/mod.rs](https://github.com/paradigmxyz/reth/blob/main/bin/reth/src/node/mod.rs#L95) +[File: bin/reth/src/node/mod.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/bin/reth/src/node/mod.rs#L314-L322) ```rust ignore pub async fn execute(&self) -> eyre::Result<()> { //--snip-- - let network = config - .network_config(db.clone(), chain_id, genesis_hash, self.network.disable_discovery) - .start_network() - .await?; + let network = self + .start_network( + network_config, + &ctx.task_executor, + transaction_pool.clone(), + default_peers_path, + ) + .await?; - info!(peer_id = ?network.peer_id(), local_addr = %network.local_addr(), "Started p2p networking"); + info!(target: "reth::cli", peer_id = %network.peer_id(), local_addr = %network.local_addr(), "Connected to P2P network"); //--snip-- } @@ -22,7 +26,7 @@ As mentioned in the network and stages chapters, when the node is first started During this process, a new `NetworkManager` is created through the `NetworkManager::new()` function, which starts the discovery protocol through a handful of newly spawned tasks. Lets take a look at how this actually works under the hood. -[File: crates/net/network/src/manager.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/manager.rs#L147) +[File: crates/net/network/src/manager.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/manager.rs#L89) ```rust ignore impl NetworkManager where @@ -37,6 +41,7 @@ where mut discovery_v4_config, discovery_addr, boot_nodes, + dns_discovery_config, //--snip-- .. } = config; @@ -50,16 +55,18 @@ where disc_config }); - let discovery = Discovery::new(discovery_addr, secret_key, discovery_v4_config).await?; + let discovery = + Discovery::new(discovery_addr, secret_key, discovery_v4_config, dns_discovery_config) + .await?; //--snip-- } } ``` -First, the `NetworkConfig` is deconstructed and the `disc_config` is updated to merge configured [bootstrap nodes](https://github.com/paradigmxyz/reth/blob/main/crates/net/discv4/src/bootnodes.rs#L8) and add the `forkid` to adhere to [EIP 868](https://eips.ethereum.org/EIPS/eip-868). This updated configuration variable is then passed into the `Discovery::new()` function. Note that `Discovery` is a catch all for all discovery services, which include discv4, DNS discovery and others in the future. +First, the `NetworkConfig` is deconstructed and the `disc_config` is updated to merge configured [bootstrap nodes](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/primitives/src/net.rs#L120-L151) and add the `forkid` to adhere to [EIP 868](https://eips.ethereum.org/EIPS/eip-868). This updated configuration variable is then passed into the `Discovery::new()` function. Note that `Discovery` is a catch all for all discovery services, which include discv4, DNS discovery and others in the future. -[File: crates/net/network/src/discovery.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/discovery.rs#L51) +[File: crates/net/network/src/discovery.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/discovery.rs#L53) ```rust ignore impl Discovery { /// Spawns the discovery service. @@ -70,6 +77,7 @@ impl Discovery { discovery_addr: SocketAddr, sk: SecretKey, discv4_config: Option, + dns_discovery_config: Option, ) -> Result { let local_enr = NodeRecord::from_secret_key(discovery_addr, &sk); @@ -86,6 +94,20 @@ impl Discovery { (None, None, None) }; + // setup DNS discovery + let (_dns_discovery, dns_discovery_updates, _dns_disc_service) = + if let Some(dns_config) = dns_discovery_config { + let (mut service, dns_disc) = DnsDiscoveryService::new_pair( + Arc::new(DnsResolver::from_system_conf()?), + dns_config, + ); + let dns_discovery_updates = service.node_record_stream(); + let dns_disc_service = service.spawn(); + (Some(dns_disc), Some(dns_discovery_updates), Some(dns_disc_service)) + } else { + (None, None, None) + }; + Ok(Self { local_enr, discv4, diff --git a/docs/crates/eth-wire.md b/docs/crates/eth-wire.md index 0c216644fce..0270668d3f9 100644 --- a/docs/crates/eth-wire.md +++ b/docs/crates/eth-wire.md @@ -12,7 +12,7 @@ This crate can be thought of as having 2 components: ## Types The most basic Eth-wire type is an `ProtocolMessage`. It describes all messages that reth can send/receive. -[File: crates/net/eth-wire/src/types/message.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/types/message.rs) +[File: crates/net/eth-wire/src/types/message.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/types/message.rs) ```rust, ignore /// An `eth` protocol message, containing a message ID and payload. #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] @@ -50,7 +50,7 @@ pub enum EthMessageID { Messages can either be broadcast to the network, or can be a request/response message to a single peer. This 2nd type of message is described using a `RequestPair` struct, which is simply a concatenation of the underlying message with a request id. -[File: crates/net/eth-wire/src/types/message.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/types/message.rs) +[File: crates/net/eth-wire/src/types/message.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/types/message.rs) ```rust, ignore #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct RequestPair { @@ -62,7 +62,7 @@ pub struct RequestPair { Every `Ethmessage` has a correspoding rust struct which implements the `Encodable` and `Decodable` traits. These traits are defined as follows: -[Crate: crates/common/rlp](https://github.com/paradigmxyz/reth/blob/main/crates/common/rlp) +[Crate: crates/rlp](https://github.com/paradigmxyz/reth/tree/1563506aea09049a85e5cc72c2894f3f7a371581/crates/rlp) ```rust, ignore pub trait Decodable: Sized { fn decode(buf: &mut &[u8]) -> Result; @@ -93,7 +93,7 @@ The items in the list are transactions in the format described in the main Ether In reth, this is represented as: -[File: crates/net/eth-wire/src/types/broadcast.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/types/broadcast.rs) +[File: crates/net/eth-wire/src/types/broadcast.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/types/broadcast.rs) ```rust,ignore pub struct Transactions( /// New transactions for the peer to include in its mempool. @@ -103,7 +103,7 @@ pub struct Transactions( And the corresponding trait implementations are present in the primitives crate. -[File: crates/primitives/src/transaction/mod.rs](https://github.com/paradigmxyz/reth/blob/main/crates/primitives/src/transaction/mod.rs) +[File: crates/primitives/src/transaction/mod.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/primitives/src/transaction/mod.rs) ```rust, ignore #[main_codec] #[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref, Default)] @@ -131,7 +131,7 @@ impl Decodable for TransactionSigned { // Implementation omitted for brevity //... } - +} ``` Now that we know how the types work, let's take a look at how these are utilized in the network. @@ -146,7 +146,7 @@ The lowest level stream to communicate with other peers is the P2P stream. It ta Decompression/Compression of bytes is done with snappy algorithm ([EIP 706](https://eips.ethereum.org/EIPS/eip-706)) using the external `snap` crate. -[File: crates/net/eth-wire/src/p2pstream.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/p2pstream.rs) +[File: crates/net/eth-wire/src/p2pstream.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/p2pstream.rs) ```rust,ignore #[pin_project] pub struct P2PStream { @@ -164,7 +164,7 @@ pub struct P2PStream { To manage pinging, an instance of the `Pinger` struct is used. This is a state machine which keeps track of how many pings we have sent/received and the timeouts associated with them. -[File: crates/net/eth-wire/src/pinger.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/pinger.rs) +[File: crates/net/eth-wire/src/pinger.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/pinger.rs) ```rust,ignore #[derive(Debug)] pub(crate) struct Pinger { @@ -190,7 +190,7 @@ pub(crate) enum PingState { State transitions are then implemented like a future, with the `poll_ping` function advancing the state of the pinger. -[File: crates/net/eth-wire/src/pinger.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/pinger.rs) +[File: crates/net/eth-wire/src/pinger.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/pinger.rs) ```rust, ignore pub(crate) fn poll_ping( &mut self, @@ -218,12 +218,12 @@ pub(crate) fn poll_ping( ``` ### Sending and receiving data -To send and recieve data, the P2PStream itself is a future which implemenents the `Stream` and `Sink` traits from the `futures` crate. +To send and receive data, the P2PStream itself is a future which implements the `Stream` and `Sink` traits from the `futures` crate. For the `Stream` trait, the `inner` stream is polled, decompressed and returned. Most of the code is just error handling and is omitted here for clarity. -[File: crates/net/eth-wire/src/p2pstream.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/p2pstream.rs) +[File: crates/net/eth-wire/src/p2pstream.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/p2pstream.rs) ```rust,ignore impl Stream for P2PStream { @@ -250,7 +250,7 @@ impl Stream for P2PStream { Similarly, for the `Sink` trait, we do the reverse, compressing and sending data out to the `inner` stream. The important functions in this trait are shown below. -[File: crates/net/eth-wire/src/p2pstream.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/p2pstream.rs) +[File: crates/net/eth-wire/src/p2pstream.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/p2pstream.rs) ```rust, ignore impl Sink for P2PStream { fn start_send(self: Pin<&mut Self>, item: Bytes) -> Result<(), Self::Error> { @@ -287,7 +287,7 @@ impl Sink for P2PStream { ## EthStream The EthStream is very simple, it does not keep track of any state, it simply wraps the P2Pstream. -[File: crates/net/eth-wire/src/ethstream.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/ethstream.rs) +[File: crates/net/eth-wire/src/ethstream.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/ethstream.rs) ```rust,ignore #[pin_project] pub struct EthStream { @@ -298,7 +298,7 @@ pub struct EthStream { EthStream's only job is to perform the RLP decoding/encoding, using the `ProtocolMessage::decode()` and `ProtocolMessage::encode()` functions we looked at earlier. -[File: crates/net/eth-wire/src/ethstream.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/ethstream.rs) +[File: crates/net/eth-wire/src/ethstream.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/ethstream.rs) ```rust,ignore impl Stream for EthStream { // ... @@ -341,7 +341,7 @@ To perform these, reth has special `Unauthed` versions of streams described abov The `UnauthedP2Pstream` does the `Hello` handshake and returns a `P2PStream`. -[File: crates/net/eth-wire/src/p2pstream.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/p2pstream.rs) +[File: crates/net/eth-wire/src/p2pstream.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/p2pstream.rs) ```rust, ignore #[pin_project] pub struct UnauthedP2PStream { @@ -370,6 +370,6 @@ impl UnauthedP2PStream { } ``` -Similary, UnauthedEthStream does the `Status` handshake and returns an `EthStream`. The code is [here](https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/ethstream.rs) +Similarly, UnauthedEthStream does the `Status` handshake and returns an `EthStream`. The code is [here](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/ethstream.rs) diff --git a/docs/crates/network.md b/docs/crates/network.md index f78a5f5d33b..edfa3515b04 100644 --- a/docs/crates/network.md +++ b/docs/crates/network.md @@ -33,7 +33,7 @@ The `"node"` CLI command, used to run the node itself, does the following at a h Steps 5-6 are of interest to us as they consume items from the `network` crate: -[File: bin/reth/src/node/mod.rs](https://github.com/paradigmxyz/reth/blob/main/bin/reth/src/node/mod.rs) +[File: bin/reth/src/node/mod.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/bin/reth/src/node/mod.rs) ```rust,ignore let network = start_network(network_config(db.clone(), chain_id, genesis_hash)).await?; @@ -84,7 +84,7 @@ pipeline.run(db.clone()).await?; Let's begin by taking a look at the line where the network is started, with the call, unsurprisingly, to `start_network`. Sounds important, doesn't it? -[File: bin/reth/src/node/mod.rs](https://github.com/paradigmxyz/reth/blob/main/bin/reth/src/node/mod.rs) +[File: bin/reth/src/node/mod.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/bin/reth/src/node/mod.rs) ```rust,ignore async fn start_network(config: NetworkConfig) -> Result where @@ -107,7 +107,7 @@ It gets the handles for the network management, transactions, and ETH requests t The `NetworkManager::builder` constructor requires a `NetworkConfig` struct to be passed in as a parameter, which can be used as the main entrypoint for setting up the entire network layer: -[File: crates/net/network/src/config.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/config.rs) +[File: crates/net/network/src/config.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/config.rs) ```rust,ignore pub struct NetworkConfig { /// The client type that can interact with the chain. @@ -152,7 +152,7 @@ pub struct NetworkConfig { The discovery task progresses as the network management task is polled, handling events regarding peer management through the `Swarm` struct which is stored as a field on the `NetworkManager`: -[File: crates/net/network/src/swarm.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/swarm.rs) +[File: crates/net/network/src/swarm.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/swarm.rs) ```rust,ignore pub(crate) struct Swarm { /// Listens for new incoming connections. @@ -180,7 +180,7 @@ Let's walk through how each is implemented, and then apply that knowledge to und The `NetworkHandle` struct is a client for the network management task that can be shared across threads. It wraps an `Arc` around the `NetworkInner` struct, defined as follows: -[File: crates/net/network/src/network.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/network.rs) +[File: crates/net/network/src/network.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/network.rs) ```rust,ignore struct NetworkInner { /// Number of active peer sessions the node's currently handling. @@ -200,7 +200,7 @@ struct NetworkInner { The field of note here is `to_manager_tx`, which is a handle that can be used to send messages in a channel to an instance of the `NetworkManager` struct. -[File: crates/net/network/src/manager.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/manager.rs) +[File: crates/net/network/src/manager.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/manager.rs) ```rust,ignore pub struct NetworkManager { /// The type that manages the actual network part, which includes connections. @@ -235,7 +235,7 @@ While the `NetworkManager` is meant to be spawned as a standalone [`tokio::task` In the pipeline, the `NetworkHandle` is used to instantiate the `FetchClient` - which we'll get into next - and is used in the `HeaderStage` to update the node's ["status"](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#status-0x00) (record the total difficulty, hash, and height of the last processed block). -[File: crates/stages/src/stages/headers.rs](https://github.com/paradigmxyz/reth/blob/main/crates/stages/src/stages/headers.rs) +[File: crates/stages/src/stages/headers.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/stages/src/stages/headers.rs) ```rust,ignore async fn update_head( &self, @@ -255,7 +255,7 @@ Now that we have some understanding about the internals of the network managemen The `FetchClient` struct, similar to `NetworkHandle`, can be shared across threads, and is a client for fetching data from the network. It's a fairly lightweight struct: -[File: crates/net/network/src/fetch/client.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/fetch/client.rs) +[File: crates/net/network/src/fetch/client.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/fetch/client.rs) ```rust,ignore pub struct FetchClient { /// Sender half of the request channel. @@ -271,7 +271,7 @@ The `request_tx` field is a handle to a channel that can be used to send request The fields `request_tx` and `peers_handle` are cloned off of the `StateFetcher` struct when instantiating the `FetchClient`, which is the lower-level struct responsible for managing data fetching operations over the network: -[File: crates/net/network/src/fetch/mod.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/fetch/mod.rs) +[File: crates/net/network/src/fetch/mod.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/fetch/mod.rs) ```rust,ignore pub struct StateFetcher { /// Currently active [`GetBlockHeaders`] requests @@ -295,7 +295,7 @@ pub struct StateFetcher { This struct itself is nested deeply within the `NetworkManager`: its `Swarm` struct (shown earlier in the chapter) contains a `NetworkState` struct that has the `StateFetcher` as a field: -[File: crates/net/network/src/state.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/state.rs) +[File: crates/net/network/src/state.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/state.rs) ```rust,ignore pub struct NetworkState { /// All active peers and their state. @@ -322,7 +322,7 @@ pub struct NetworkState { The `FetchClient` implements the `HeadersClient` and `BodiesClient` traits, defining the functionality to get headers and block bodies from available peers. -[File: crates/net/network/src/fetch/client.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/fetch/client.rs) +[File: crates/net/network/src/fetch/client.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/fetch/client.rs) ```rust,ignore impl HeadersClient for FetchClient { /// Sends a `GetBlockHeaders` request to an available peer. @@ -346,7 +346,7 @@ This functionality is used in the `HeaderStage` and `BodyStage`, respectively. In the pipeline used by the main Reth binary, the `HeaderStage` uses a `ReverseHeadersDownloader` to stream headers from the network: -[File: crates/net/downloaders/src/headers/linear.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/downloaders/src/headers/linear.rs) +[File: crates/net/downloaders/src/headers/reverse_headers.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/downloaders/src/headers/reverse_headers.rs) ```rust,ignore pub struct ReverseHeadersDownloader { /// The consensus client @@ -362,7 +362,7 @@ pub struct ReverseHeadersDownloader { A `FetchClient` is passed in to the `client` field, and the `get_headers` method it implements gets used when polling the stream created by the `ReverseHeadersDownloader` in the `execute` method of the `HeaderStage`. -[File: crates/net/downloaders/src/headers/linear.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/downloaders/src/headers/linear.rs) +[File: crates/net/downloaders/src/headers/reverse_headers.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/downloaders/src/headers/reverse_headers.rs) ```rust,ignore fn get_or_init_fut(&mut self) -> HeadersRequestFuture { match self.request.take() { @@ -388,7 +388,7 @@ fn get_or_init_fut(&mut self) -> HeadersRequestFuture { In the `BodyStage` configured by the main binary, a `BodiesDownloader` is used: -[File: crates/net/downloaders/src/bodies/concurrent.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/downloaders/src/bodies/concurrent.rs) +[File: crates/net/downloaders/src/bodies/bodies.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/downloaders/src/bodies/bodies.rs) ```rust,ignore pub struct BodiesDownloader { /// The bodies client @@ -406,7 +406,7 @@ pub struct BodiesDownloader { Here, similarly, a `FetchClient` is passed in to the `client` field, and the `get_block_bodies` method it implements is used when constructing the stream created by the `BodiesDownloader` in the `execute` method of the `BodyStage`. -[File: crates/net/downloaders/src/bodies/concurrent.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/downloaders/src/bodies/concurrent.rs) +[File: crates/net/downloaders/src/bodies/bodies.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/downloaders/src/bodies/bodies.rs) ```rust,ignore async fn fetch_bodies( &self, @@ -425,7 +425,7 @@ When `FetchClient.get_headers` or `FetchClient.get_block_bodies` is called, thos Every time the `StateFetcher` is polled, it finds the next idle peer available to service the current request (for either a block header, or a block body). In this context, "idle" means any peer that is not currently handling a request from the node: -[File: crates/net/network/src/fetch/mod.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/fetch/mod.rs) +[File: crates/net/network/src/fetch/mod.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/fetch/mod.rs) ```rust,ignore /// Returns the next action to return fn poll_action(&mut self) -> PollAction { @@ -455,7 +455,7 @@ The ETH requests task serves _incoming_ requests related to blocks in the [`eth` Similar to the network management task, it's implemented as an endless future, but it is meant to run as a background task (on a standalone `tokio::task`) and not to be interacted with directly from the pipeline. It's represented by the following `EthRequestHandler` struct: -[File: crates/net/network/src/eth_requests.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/eth_requests.rs) +[File: crates/net/network/src/eth_requests.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/eth_requests.rs) ```rust,ignore pub struct EthRequestHandler { /// The client type that can interact with the chain. @@ -480,7 +480,7 @@ As the `NetworkManager` is polled and listens for events from peers passed throu Being an endless future, the core of the ETH requests task's functionality is in its `poll` method implementation. As the `EthRequestHandler` is polled, it listens for any ETH requests coming through the channel, and handles them accordingly. At the time of writing, the ETH requests task can handle the [`GetBlockHeaders`](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#getblockheaders-0x03) and [`GetBlockBodies`](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#getblockbodies-0x05) requests. -[File: crates/net/network/src/eth_requests.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/eth_requests.rs) +[File: crates/net/network/src/eth_requests.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/eth_requests.rs) ```rust,ignore fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); @@ -506,7 +506,7 @@ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { The handling of these requests is fairly straightforward. The `GetBlockHeaders` payload is the following: -[File: crates/net/eth-wire/src/types/blocks.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/types/blocks.rs) +[File: crates/net/eth-wire/src/types/blocks.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/types/blocks.rs) ```rust,ignore pub struct GetBlockHeaders { /// The block number or hash that the peer should start returning headers from. @@ -528,7 +528,7 @@ pub struct GetBlockHeaders { In handling this request, the ETH requests task attempts, starting with `start_block`, to fetch the associated header from the database, increment/decrement the block number to fetch by `skip` depending on the `direction` while checking for overflow/underflow, and checks that bounds specifying the maximum numbers of headers or bytes to send have not been breached. -[File: crates/net/network/src/eth_requests.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/eth_requests.rs) +[File: crates/net/network/src/eth_requests.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/eth_requests.rs) ```rust,ignore fn get_headers_response(&self, request: GetBlockHeaders) -> Vec
{ let GetBlockHeaders { start_block, limit, skip, direction } = request; @@ -598,7 +598,7 @@ fn get_headers_response(&self, request: GetBlockHeaders) -> Vec
{ The `GetBlockBodies` payload is simpler, it just contains a vector of requested block hashes: -[File: crates/net/eth-wire/src/types/blocks.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/types/blocks.rs) +[File: crates/net/eth-wire/src/types/blocks.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/types/blocks.rs) ```rust,ignore pub struct GetBlockBodies( /// The block hashes to request bodies for. @@ -608,7 +608,7 @@ pub struct GetBlockBodies( In handling this request, similarly, the ETH requests task attempts, for each hash in the requested order, to fetch the block body (transactions & ommers), while checking that bounds specifying the maximum numbers of bodies or bytes to send have not been breached. -[File: crates/net/network/src/eth_requests.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/eth_requests.rs) +[File: crates/net/network/src/eth_requests.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/eth_requests.rs) ```rust,ignore fn on_bodies_request( &mut self, @@ -653,7 +653,7 @@ in the [transaction-pool](../../../ethereum/transaction-pool/README.md) chapter. Again, like the network management and ETH requests tasks, the transactions task is implemented as an endless future that runs as a background task on a standalone `tokio::task`. It's represented by the `TransactionsManager` struct: -[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/transactions.rs) +[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/transactions.rs) ```rust,ignore pub struct TransactionsManager { /// Access to the transaction pool. @@ -688,7 +688,7 @@ pub struct TransactionsManager { Unlike the ETH requests task, but like the network management task's `NetworkHandle`, the transactions task can also be accessed via a shareable "handle" struct, the `TransactionsHandle`: -[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/transactions.rs) +[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/transactions.rs) ```rust,ignore pub struct TransactionsHandle { /// Command channel to the [`TransactionsManager`] @@ -711,7 +711,7 @@ Let's get a view into the transactions task's operation by walking through the ` The `poll` method lays out an order of operations for the transactions task. It begins by draining the `TransactionsManager.network_events`, `TransactionsManager.command_rx`, and `TransactionsManager.transaction_events` streams, in this order. Then, it checks on all the current `TransactionsManager.inflight_requests`, which are requests sent by the node to its peers for full transaction objects. After this, it checks on the status of completed `TransactionsManager.pool_imports` events, which are transactions that are being imported into the node's transaction pool. Finally, it drains the new `TransactionsManager.pending_transactions` events from the transaction pool. -[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/transactions.rs) +[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/transactions.rs) ```rust,ignore fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); @@ -786,7 +786,7 @@ The `TransactionsManager.network_events` stream is the first to have all of its The events received in this channel are of type `NetworkEvent`: -[File: crates/net/network/src/manager.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/manager.rs) +[File: crates/net/network/src/manager.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/manager.rs) ```rust,ignore pub enum NetworkEvent { /// Closed the peer session. @@ -822,7 +822,7 @@ Removes the peer given by `NetworkEvent::SessionClosed.peer_id` from the `Transa **`NetworkEvent::SessionEstablished`** Begins by inserting a `Peer` into `TransactionsManager.peers` by `peer_id`, which is a struct of the following form: -[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/transactions.rs) +[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/transactions.rs) ```rust,ignore struct Peer { /// Keeps track of transactions that we know the peer has seen. @@ -838,7 +838,7 @@ The `request_tx` field on the `Peer` is used at the sender end of a channel to s After the `Peer` is added to `TransactionsManager.peers`, the hashes of all of the transactions in the node's transaction pool are sent to the peer in a [`NewPooledTransactionHashes` message](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#newpooledtransactionhashes-0x08). -[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/transactions.rs) +[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/transactions.rs) ```rust,ignore fn on_network_event(&mut self, event: NetworkEvent) { match event { @@ -875,7 +875,7 @@ fn on_network_event(&mut self, event: NetworkEvent) { Next in the `poll` method, `TransactionsCommand`s sent through the `TransactionsManager.command_rx` stream are handled. These are the next to be handled as they are those sent manually via the `TransactionsHandle`, giving them precedence over transactions-related requests picked up from the network. The `TransactionsCommand` enum has the following form: -[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/transactions.rs) +[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/transactions.rs) ```rust,ignore enum TransactionsCommand { PropagateHash(H256), @@ -886,7 +886,7 @@ enum TransactionsCommand { `on_new_transactions` propagates the full transaction object, with the signer attached, to a small random sample of peers using the `propagate_transactions` method. Then, it notifies all other peers of the hash of the new transaction, so that they can request the full transaction object if they don't already have it. -[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/transactions.rs) +[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/transactions.rs) ```rust,ignore fn on_new_transactions(&mut self, hashes: impl IntoIterator) { trace!(target: "net::tx", "Start propagating transactions"); @@ -946,7 +946,7 @@ fn propagate_transactions( After `TransactionsCommand`s, it's time to take care of transactions-related requests sent by peers in the network, so the `poll` method handles `NetworkTransactionEvent`s received through the `TransactionsManager.transaction_events` stream. `NetworkTransactionEvent` has the following form: -[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/transactions.rs) +[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/transactions.rs) ```rust,ignore pub enum NetworkTransactionEvent { /// Received list of transactions from the given peer. @@ -976,7 +976,7 @@ To understand this a bit better, let's double back and examine what `Transaction `TransactionsManager.pool_imports` is a set of futures representing the transactions which are currently in the process of being imported to the node's transaction pool. This process is asynchronous due to the validation of the transaction that must occur, thus we need to keep a handle on the generated future. -[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/transactions.rs) +[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/transactions.rs) ```rust,ignore fn import_transactions(&mut self, peer_id: PeerId, transactions: Vec) { let mut has_bad_transactions = false; @@ -1026,7 +1026,7 @@ This event is generated from the [`NewPooledTransactionHashes` protocol message] Here, it begins by adding the transaction hashes included in the `NewPooledTransactionHashes` payload to the LRU cache for the `Peer` identified by `peer_id` in `TransactionsManager.peers`. Next, it filters the list of hashes to those that are not already present in the transaction pool, and for each such hash, requests its full transaction object from the peer by sending it a [`GetPooledTransactions` protocol message](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#getpooledtransactions-0x09) through the `Peer.request_tx` channel. If the request was successfully sent, a `GetPooledTxRequest` gets added to `TransactionsManager.inflight_requests` vector: -[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/transactions.rs) +[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/transactions.rs) ```rust,ignore struct GetPooledTxRequest { peer_id: PeerId, @@ -1036,7 +1036,7 @@ struct GetPooledTxRequest { As you can see, this struct also contains a `response` channel from which the peer's response can later be polled. -[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/transactions.rs) +[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/transactions.rs) ```rust,ignore fn on_new_pooled_transactions(&mut self, peer_id: PeerId, msg: NewPooledTransactionHashes) { if let Some(peer) = self.peers.get_mut(&peer_id) { @@ -1072,7 +1072,7 @@ This event is generated from the [`GetPooledTransactions` protocol message](http Here, it collects _all_ the transactions in the node's transaction pool, recovers their signers, adds their hashes to the LRU cache of the requesting peer, and sends them to the peer in a [`PooledTransactions` protocol message](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#pooledtransactions-0x0a). This is sent through the `response` channel that's stored as a field of the `NetworkTransaction::GetPooledTransactions` variant itself. -[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/transactions.rs) +[File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/transactions.rs) ```rust,ignore fn on_get_pooled_transactions( &mut self, diff --git a/docs/crates/stages.md b/docs/crates/stages.md index 430579d197b..f9fcc33cd6a 100644 --- a/docs/crates/stages.md +++ b/docs/crates/stages.md @@ -2,7 +2,7 @@ The `stages` lib plays a central role in syncing the node, maintaining state, updating the database and more. The stages involved in the Reth pipeline are the `HeaderStage`, `BodyStage`, `SenderRecoveryStage`, and `ExecutionStage` (note that this list is non-exhaustive, and more pipeline stages will be added in the near future). Each of these stages are queued up and stored within the Reth pipeline. -[File: crates/stages/src/pipeline.rs](https://github.com/paradigmxyz/reth/blob/main/crates/stages/src/pipeline.rs) +[File: crates/stages/src/pipeline/mod.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/stages/src/pipeline/mod.rs) ```rust,ignore pub struct Pipeline { stages: Vec>, @@ -19,7 +19,7 @@ When the node is first started, a new `Pipeline` is initialized and all of the s Each stage within the pipeline implements the `Stage` trait which provides function interfaces to get the stage id, execute the stage and unwind the changes to the database if there was an issue during the stage execution. -[File: crates/stages/src/stage.rs](https://github.com/paradigmxyz/reth/blob/main/crates/stages/src/stage.rs) +[File: crates/stages/src/stage.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/stages/src/stage.rs) ```rust,ignore pub trait Stage: Send + Sync { /// Get the ID of the stage. @@ -53,7 +53,7 @@ To get a better idea of what is happening at each part of the pipeline, lets wal The `HeaderStage` is responsible for syncing the block headers, validating the header integrity and writing the headers to the database. When the `execute()` function is called, the local head of the chain is updated to the most recent block height previously executed by the stage. At this point, the node status is also updated with that block's height, hash and total difficulty. These values are used during any new eth/65 handshakes. After updating the head, a stream is established with other peers in the network to sync the missing chain headers between the most recent state stored in the database and the chain tip. The `HeaderStage` contains a `downloader` attribute, which is a type that implements the `HeaderDownloader` trait. A `HeaderDownloader` is a `Stream` that returns batches of headers. -[File: crates/interfaces/src/p2p/headers/downloader.rs](https://github.com/paradigmxyz/reth/blob/main/crates/interfaces/src/p2p/headers/downloader.rs) +[File: crates/interfaces/src/p2p/headers/downloader.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/interfaces/src/p2p/headers/downloader.rs) ```rust,ignore pub trait HeaderDownloader: Send + Sync + Stream> + Unpin { /// Updates the gap to sync which ranges from local head to the sync target @@ -77,7 +77,7 @@ pub trait HeaderDownloader: Send + Sync + Stream> + Unp The `HeaderStage` relies on the downloader stream to return the headers in descending order starting from the chain tip down to the latest block in the database. While other stages in the `Pipeline` start from the most recent block in the database up to the chain tip, the `HeaderStage` works in reverse to avoid [long-range attacks](https://messari.io/report/long-range-attack). When a node downloads headers in ascending order, it will not know if it is being subjected to a long-range attack until it reaches the most recent blocks. To combat this, the `HeaderStage` starts by getting the chain tip from the Consensus Layer, verifies the tip, and then walks backwards by the parent hash. Each value yielded from the stream is a `SealedHeader`. -[File: crates/primitives/src/header.rs](https://github.com/paradigmxyz/reth/blob/main/crates/primitives/src/header.rs) +[File: crates/primitives/src/header.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/primitives/src/header.rs) ```rust,ignore pub struct SealedHeader { /// Locked Header fields. @@ -130,7 +130,7 @@ The new block is then pre-validated, checking that the ommers hash and transacti Following a successful `BodyStage`, the `SenderRecoveryStage` starts to execute. The `SenderRecoveryStage` is responsible for recovering the transaction sender for each of the newly added transactions to the database. At the beginning of the execution function, all of the transactions are first retrieved from the database. Then the `SenderRecoveryStage` goes through each transaction and recovers the signer from the transaction signature and hash. The transaction hash is derived by taking the Keccak 256-bit hash of the RLP encoded transaction bytes. This hash is then passed into the `recover_signer` function. -[File: crates/primitives/src/transaction/signature.rs](https://github.com/paradigmxyz/reth/blob/main/crates/primitives/src/transaction/signature.rs) +[File: crates/primitives/src/transaction/signature.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/primitives/src/transaction/signature.rs) ```rust,ignore pub(crate) fn recover_signer(&self, hash: H256) -> Option
{ let mut sig: [u8; 65] = [0; 65]; @@ -157,7 +157,7 @@ Once the transaction signer has been recovered, the signer is then added to the Finally, after all headers, bodies and senders are added to the database, the `ExecutionStage` starts to execute. This stage is responsible for executing all of the transactions and updating the state stored in the database. For every new block header added to the database, the corresponding transactions have their signers attached to them and `reth_blockchain_tree::executor::execute_and_verify_receipt()` is called, pushing the state changes resulting from the execution to a `Vec`. -[File: crates/stages/src/stages/execution.rs](https://github.com/paradigmxyz/reth/blob/main/crates/stages/src/stages/execution.rs) +[File: crates/stages/src/stages/execution.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/stages/src/stages/execution.rs) ```rust,ignore pub fn execute_and_verify_receipt( block: &Block, diff --git a/docs/design/database.md b/docs/design/database.md index eae89bf9d01..45f9d2a139c 100644 --- a/docs/design/database.md +++ b/docs/design/database.md @@ -14,7 +14,7 @@ * We implemented that trait for the following encoding formats: * [Ethereum-specific Compact Encoding](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/codecs/derive/src/compact/mod.rs): A lot of Ethereum datatypes have unnecessary zeros when serialized, or optional (e.g. on empty hashes) which would be nice not to pay in storage costs. * [Erigon](https://github.com/ledgerwatch/erigon/blob/12ee33a492f5d240458822d052820d9998653a63/docs/programmers_guide/db_walkthrough.MD) achieves that by having a `bitfield` set on Table "PlainState which adds a bitfield to Accounts. - * Akula expanded it for other tables and datatypes manually. It also saved some more space by storing the length of certain types (U256, u64) using the modular_bitfield crate, which compacts this information. + * Akula expanded it for other tables and datatypes manually. It also saved some more space by storing the length of certain types (U256, u64) using the [`modular_bitfield`](https://docs.rs/modular-bitfield/latest/modular_bitfield/) crate, which compacts this information. * We generalized it for all types, by writing a derive macro that autogenerates code for implementing the trait. It, also generates the interfaces required for fuzzing using ToB/test-fuzz: * [Scale Encoding](https://github.com/paritytech/parity-scale-codec) * [Postcard Encoding](https://github.com/jamesmunns/postcard) diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 39f6f07592b..c230db75858 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -27,7 +27,13 @@ "type": "grafana", "id": "grafana", "name": "Grafana", - "version": "10.0.2" + "version": "10.1.0" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph (old)", + "version": "" }, { "type": "panel", @@ -159,7 +165,7 @@ "showThresholdLabels": false, "showThresholdMarkers": true }, - "pluginVersion": "10.0.1", + "pluginVersion": "10.1.0", "targets": [ { "datasource": { @@ -226,7 +232,7 @@ "showUnfilled": true, "valueMode": "color" }, - "pluginVersion": "10.0.1", + "pluginVersion": "10.1.0", "targets": [ { "datasource": { @@ -270,6 +276,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -361,6 +368,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -470,6 +478,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -609,7 +618,7 @@ "unit": "percentunit" } }, - "pluginVersion": "10.0.1", + "pluginVersion": "10.1.0", "targets": [ { "datasource": { @@ -727,6 +736,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -1006,7 +1016,7 @@ }, "showHeader": true }, - "pluginVersion": "10.0.1", + "pluginVersion": "10.1.0", "targets": [ { "datasource": { @@ -1026,13 +1036,108 @@ "title": "Overflow pages by table", "type": "table" }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The number of pages on the MDBX freelist", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 42 + }, + "id": 113, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "sum(reth_db_freelist{instance=~\"$instance\"}) by (job)", + "legendFormat": "Pages ({{job}})", + "range": true, + "refId": "A" + } + ], + "title": "Freelist", + "type": "timeseries" + }, { "collapsed": false, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 42 + "y": 50 }, "id": 46, "panels": [], @@ -1066,6 +1171,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -1100,7 +1206,7 @@ "h": 8, "w": 24, "x": 0, - "y": 43 + "y": 51 }, "id": 56, "options": { @@ -1173,7 +1279,7 @@ "h": 1, "w": 24, "x": 0, - "y": 51 + "y": 59 }, "id": 6, "panels": [], @@ -1207,6 +1313,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -1244,7 +1351,7 @@ "h": 8, "w": 8, "x": 0, - "y": 52 + "y": 60 }, "id": 18, "options": { @@ -1300,6 +1407,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -1337,7 +1445,7 @@ "h": 8, "w": 8, "x": 8, - "y": 52 + "y": 60 }, "id": 16, "options": { @@ -1418,6 +1526,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -1456,7 +1565,7 @@ "h": 8, "w": 8, "x": 16, - "y": 52 + "y": 60 }, "id": 8, "options": { @@ -1535,7 +1644,7 @@ "h": 8, "w": 8, "x": 0, - "y": 60 + "y": 68 }, "id": 54, "options": { @@ -1718,6 +1827,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -1755,7 +1865,7 @@ "h": 8, "w": 14, "x": 8, - "y": 60 + "y": 68 }, "id": 103, "options": { @@ -1792,7 +1902,7 @@ "h": 1, "w": 24, "x": 0, - "y": 68 + "y": 76 }, "id": 24, "panels": [], @@ -1825,6 +1935,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -1887,7 +1998,7 @@ "h": 8, "w": 12, "x": 0, - "y": 69 + "y": 77 }, "id": 26, "options": { @@ -1980,6 +2091,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -2018,7 +2130,7 @@ "h": 8, "w": 12, "x": 12, - "y": 69 + "y": 77 }, "id": 33, "options": { @@ -2098,6 +2210,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -2135,7 +2248,7 @@ "h": 8, "w": 12, "x": 0, - "y": 77 + "y": 85 }, "id": 36, "options": { @@ -2184,7 +2297,7 @@ "h": 1, "w": 24, "x": 0, - "y": 85 + "y": 93 }, "id": 32, "panels": [], @@ -2218,6 +2331,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -2289,7 +2403,7 @@ "h": 8, "w": 12, "x": 0, - "y": 86 + "y": 94 }, "id": 30, "options": { @@ -2417,6 +2531,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -2452,7 +2567,7 @@ "h": 8, "w": 12, "x": 12, - "y": 86 + "y": 94 }, "id": 28, "options": { @@ -2532,6 +2647,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -2569,7 +2685,7 @@ "h": 8, "w": 12, "x": 0, - "y": 94 + "y": 102 }, "id": 35, "options": { @@ -2637,6 +2753,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -2692,7 +2809,7 @@ "h": 8, "w": 12, "x": 12, - "y": 94 + "y": 102 }, "id": 73, "options": { @@ -2742,7 +2859,7 @@ "h": 1, "w": 24, "x": 0, - "y": 102 + "y": 110 }, "id": 89, "panels": [], @@ -2776,6 +2893,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -2814,7 +2932,7 @@ "h": 8, "w": 12, "x": 0, - "y": 103 + "y": 111 }, "id": 91, "options": { @@ -2894,6 +3012,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -2931,7 +3050,7 @@ "h": 8, "w": 12, "x": 12, - "y": 103 + "y": 111 }, "id": 92, "options": { @@ -3011,6 +3130,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -3066,7 +3186,7 @@ "h": 8, "w": 12, "x": 0, - "y": 111 + "y": 119 }, "id": 102, "options": { @@ -3148,6 +3268,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -3185,7 +3306,7 @@ "h": 8, "w": 12, "x": 12, - "y": 111 + "y": 119 }, "id": 94, "options": { @@ -3242,6 +3363,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -3309,7 +3431,7 @@ "h": 8, "w": 12, "x": 0, - "y": 119 + "y": 127 }, "id": 93, "options": { @@ -3389,6 +3511,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -3452,7 +3575,7 @@ "h": 8, "w": 12, "x": 12, - "y": 119 + "y": 127 }, "id": 95, "options": { @@ -3515,7 +3638,7 @@ "h": 1, "w": 24, "x": 0, - "y": 127 + "y": 135 }, "id": 79, "panels": [], @@ -3549,6 +3672,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -3586,7 +3710,7 @@ "h": 8, "w": 12, "x": 0, - "y": 128 + "y": 136 }, "id": 74, "options": { @@ -3643,6 +3767,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -3680,7 +3805,7 @@ "h": 8, "w": 12, "x": 12, - "y": 128 + "y": 136 }, "id": 80, "options": { @@ -3737,6 +3862,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -3774,7 +3900,7 @@ "h": 8, "w": 12, "x": 0, - "y": 136 + "y": 144 }, "id": 81, "options": { @@ -3812,7 +3938,7 @@ "h": 1, "w": 24, "x": 0, - "y": 144 + "y": 152 }, "id": 87, "panels": [], @@ -3846,6 +3972,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -3883,7 +4010,7 @@ "h": 8, "w": 12, "x": 0, - "y": 145 + "y": 153 }, "id": 83, "options": { @@ -3939,6 +4066,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -3976,7 +4104,7 @@ "h": 8, "w": 12, "x": 12, - "y": 145 + "y": 153 }, "id": 84, "options": { @@ -4044,6 +4172,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -4081,7 +4210,7 @@ "h": 8, "w": 12, "x": 0, - "y": 153 + "y": 161 }, "id": 85, "options": { @@ -4118,7 +4247,7 @@ "h": 1, "w": 24, "x": 0, - "y": 161 + "y": 169 }, "id": 68, "panels": [], @@ -4152,6 +4281,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 3, "pointSize": 5, @@ -4189,7 +4319,7 @@ "h": 8, "w": 12, "x": 0, - "y": 162 + "y": 170 }, "id": 60, "options": { @@ -4245,6 +4375,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 3, "pointSize": 5, @@ -4282,7 +4413,7 @@ "h": 8, "w": 12, "x": 12, - "y": 162 + "y": 170 }, "id": 62, "options": { @@ -4338,6 +4469,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 3, "pointSize": 5, @@ -4375,7 +4507,7 @@ "h": 8, "w": 12, "x": 0, - "y": 170 + "y": 178 }, "id": 64, "options": { @@ -4412,7 +4544,7 @@ "h": 1, "w": 24, "x": 0, - "y": 178 + "y": 186 }, "id": 97, "panels": [], @@ -4443,6 +4575,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -4481,7 +4614,7 @@ "h": 8, "w": 12, "x": 0, - "y": 179 + "y": 187 }, "id": 98, "options": { @@ -4503,7 +4636,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_jemalloc_active", + "expr": "reth_jemalloc_active{instance=~\"$instance\"}", "instant": false, "legendFormat": "Active", "range": true, @@ -4515,7 +4648,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_jemalloc_allocated", + "expr": "reth_jemalloc_allocated{instance=~\"$instance\"}", "hide": false, "instant": false, "legendFormat": "Allocated", @@ -4528,7 +4661,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_jemalloc_mapped", + "expr": "reth_jemalloc_mapped{instance=~\"$instance\"}", "hide": false, "instant": false, "legendFormat": "Mapped", @@ -4541,7 +4674,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_jemalloc_metadata", + "expr": "reth_jemalloc_metadata{instance=~\"$instance\"}", "hide": false, "instant": false, "legendFormat": "Metadata", @@ -4554,7 +4687,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_jemalloc_resident", + "expr": "reth_jemalloc_resident{instance=~\"$instance\"}", "hide": false, "instant": false, "legendFormat": "Resident", @@ -4567,7 +4700,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "builder", - "expr": "reth_jemalloc_retained", + "expr": "reth_jemalloc_retained{instance=~\"$instance\"}", "hide": false, "instant": false, "legendFormat": "Retained", @@ -4603,6 +4736,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -4641,7 +4775,7 @@ "h": 8, "w": 12, "x": 12, - "y": 179 + "y": 187 }, "id": 101, "options": { @@ -4663,7 +4797,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "reth_process_resident_memory_bytes", + "expr": "reth_process_resident_memory_bytes{instance=~\"$instance\"}", "instant": false, "legendFormat": "Resident", "range": true, @@ -4698,6 +4832,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -4736,7 +4871,7 @@ "h": 8, "w": 12, "x": 0, - "y": 187 + "y": 195 }, "id": 99, "options": { @@ -4757,8 +4892,8 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "editorMode": "code", - "expr": "avg(rate(reth_process_cpu_seconds_total[1m]))", + "editorMode": "builder", + "expr": "avg(rate(reth_process_cpu_seconds_total{instance=~\"$instance\"}[1m]))", "instant": false, "legendFormat": "Process", "range": true, @@ -4793,6 +4928,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -4831,7 +4967,7 @@ "h": 8, "w": 12, "x": 12, - "y": 187 + "y": 195 }, "id": 100, "options": { @@ -4852,8 +4988,8 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "editorMode": "code", - "expr": "reth_process_open_fds", + "editorMode": "builder", + "expr": "reth_process_open_fds{instance=~\"$instance\"}", "instant": false, "legendFormat": "Open", "range": true, @@ -4869,7 +5005,7 @@ "h": 1, "w": 24, "x": 0, - "y": 195 + "y": 203 }, "id": 105, "panels": [], @@ -4900,6 +5036,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -4939,7 +5076,7 @@ "h": 8, "w": 12, "x": 0, - "y": 196 + "y": 204 }, "id": 106, "options": { @@ -4995,6 +5132,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -5034,7 +5172,7 @@ "h": 8, "w": 12, "x": 12, - "y": 196 + "y": 204 }, "id": 107, "options": { @@ -5072,62 +5210,190 @@ "h": 1, "w": 24, "x": 0, - "y": 204 + "y": 212 }, - "id": 97, + "id": 108, "panels": [], "title": "RPC server", "type": "row" }, { - "title": "Active Requests", - "description": "The number of active requests.", - "type": "graph", + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, + "description": "The number of active requests.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 213 + }, + "hiddenSeries": false, + "id": 109, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.1.0", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, "targets": [ { - "expr": "reth_rpc_server_requests_started - reth_rpc_server_requests_finished", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_rpc_server_requests_started{instance=~\"$instance\"} - reth_rpc_server_requests_finished{instance=~\"$instance\"}", "format": "time_series", "legendFormat": "Active Requests", + "range": true, "refId": "A" } ], - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 205 + "thresholds": [], + "timeRegions": [], + "title": "Active Requests", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false } }, { - "title": "Active Websocket Connections", - "description": "The number of active websocket connections.", - "type": "graph", + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, + "description": "The number of active websocket connections.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 213 + }, + "hiddenSeries": false, + "id": 110, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.1.0", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, "targets": [ { - "expr": "reth_rpc_server_ws_session_opened - reth_rpc_server_ws_session_closed", + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_rpc_server_ws_session_opened{instance=~\"$instance\"} - reth_rpc_server_ws_session_closed{instance=~\"$instance\"}", "format": "time_series", "legendFormat": "Active Websocket Connections", + "range": true, "refId": "A" } ], - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 205 + "thresholds": [], + "timeRegions": [], + "title": "Active Websocket Connections", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false } }, { - "title": "Request Latency time", - "type": "heatmap", "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" @@ -5152,9 +5418,9 @@ "h": 8, "w": 12, "x": 0, - "y": 213 + "y": 221 }, - "id": 42, + "id": 111, "maxDataPoints": 25, "options": { "calculate": false, @@ -5196,7 +5462,7 @@ "unit": "percentunit" } }, - "pluginVersion": "10.0.2", + "pluginVersion": "10.1.0", "targets": [ { "datasource": { @@ -5212,11 +5478,11 @@ "range": true, "refId": "A" } - ] + ], + "title": "Request Latency time", + "type": "heatmap" }, { - "title": "Call Latency time", - "type": "heatmap", "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" @@ -5241,9 +5507,9 @@ "h": 8, "w": 12, "x": 12, - "y": 213 + "y": 221 }, - "id": 42, + "id": 112, "maxDataPoints": 25, "options": { "calculate": false, @@ -5285,7 +5551,7 @@ "unit": "percentunit" } }, - "pluginVersion": "10.0.2", + "pluginVersion": "10.1.0", "targets": [ { "datasource": { @@ -5301,7 +5567,9 @@ "range": true, "refId": "A" } - ] + ], + "title": "Call Latency time", + "type": "heatmap" } ], "refresh": "30s", @@ -5343,6 +5611,6 @@ "timezone": "", "title": "reth", "uid": "2k8BXz24x", - "version": 4, + "version": 6, "weekStart": "" } \ No newline at end of file diff --git a/examples/network-txpool.rs b/examples/network-txpool.rs index 08d69e964cf..d9c61636cbe 100644 --- a/examples/network-txpool.rs +++ b/examples/network-txpool.rs @@ -34,8 +34,7 @@ async fn main() -> eyre::Result<()> { let local_key = rng_secret_key(); // Configure the network - let config = - NetworkConfig::::builder(local_key).mainnet_boot_nodes().build(client); + let config = NetworkConfig::builder(local_key).mainnet_boot_nodes().build(client); // create the network instance let (_handle, network, txpool, _) = diff --git a/examples/network.rs b/examples/network.rs index 09a20f3118e..8fe6a6a8ec7 100644 --- a/examples/network.rs +++ b/examples/network.rs @@ -19,8 +19,7 @@ async fn main() -> eyre::Result<()> { let local_key = rng_secret_key(); // Configure the network - let config = - NetworkConfig::::builder(local_key).mainnet_boot_nodes().build(client); + let config = NetworkConfig::builder(local_key).mainnet_boot_nodes().build(client); // create the network instance let network = NetworkManager::new(config).await?;