diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index b68135e4d84..35032a09326 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -40,13 +40,29 @@ jobs: run: make && make install-lcli - name: Start local testnet - run: ./start_local_testnet.sh + run: ./start_local_testnet.sh && sleep 60 working-directory: scripts/local_testnet - name: Print logs - run: ./print_logs.sh + run: ./dump_logs.sh working-directory: scripts/local_testnet - name: Stop local testnet run: ./stop_local_testnet.sh working-directory: scripts/local_testnet + + - name: Clean-up testnet + run: ./clean.sh + working-directory: scripts/local_testnet + + - name: Start local testnet with blinded block production + run: ./start_local_testnet.sh -p && sleep 60 + working-directory: scripts/local_testnet + + - name: Print logs for blinded block testnet + run: ./dump_logs.sh + working-directory: scripts/local_testnet + + - name: Stop local testnet with blinded block production + run: ./stop_local_testnet.sh + working-directory: scripts/local_testnet diff --git a/Cargo.lock b/Cargo.lock index e06b5f55ad3..a93bd7fd5e4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -178,6 +178,27 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e22d1f4b888c298a027c99dc9048015fac177587de20fc30232a057dfbe24a21" +[[package]] +name = "async-stream" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dad5c83079eae9969be7fadefe640a1c566901f05ff91ab221de4b6f68d9507e" +dependencies = [ + "async-stream-impl", + "futures-core", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10f203db73a71dfa2fb6dd22763990fa26f3d2625a6da2da900d23b87d26be27" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "async-trait" version = "0.1.56" @@ -262,11 +283,56 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "axum" +version = "0.5.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b9496f0c1d1afb7a2af4338bbe1d969cddfead41d87a9fb3aaa6d0bbc7af648" +dependencies = [ + "async-trait", + "axum-core", + "bitflags", + "bytes", + "futures-util", + "http", + "http-body", + "hyper", + "itoa 1.0.2", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite 0.2.9", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tower", + "tower-http", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4f44a0e6200e9d11a1cdc989e4b358f6e3d354fbf48478f345a17f4e43f8635" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "mime", +] + [[package]] name = "backtrace" -version = "0.3.65" +version = "0.3.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11a17d453482a265fd5f8479f2a3f405566e6ca627837aaddb85af8b1ab8ef61" +checksum = "cab84319d616cfb654d03394f38ab7e6f0919e181b1b57e1fd15e7fb4077d9a7" dependencies = [ "addr2line", "cc", @@ -295,6 +361,24 @@ version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3bdca834647821e0b13d9539a8634eb62d3501b6b6c2cec1722786ee6671b851" +[[package]] +name = "beacon-api-client" +version = "0.1.0" +source = "git+https://github.com/ralexstokes/beacon-api-client#061c1b1bb1f18bcd7cf23d4cd375f99c78d5a2a5" +dependencies = [ + "ethereum-consensus", + "http", + "itertools", + "reqwest", + "serde", + "serde_json", + "thiserror", + "tokio", + "tracing", + "tracing-subscriber", + "url", +] + [[package]] name = "beacon_chain" version = "0.2.0" @@ -347,6 +431,7 @@ dependencies = [ "tokio", "tree_hash", "types", + "unused_port", ] [[package]] @@ -430,9 +515,9 @@ dependencies = [ [[package]] name = "bitvec" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1489fcb93a5bb47da0462ca93ad252ad6af2145cce58d10d46a83931ba9f016b" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ "funty 2.0.0", "radium 0.7.0", @@ -591,9 +676,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" +checksum = "f0b3de4a0c5e67e16066a0715723abd91edc2f9001d09c46e1dca929351e130e" dependencies = [ "serde", ] @@ -636,12 +721,9 @@ dependencies = [ [[package]] name = "cast" -version = "0.2.7" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c24dab4283a142afa2fdca129b80ad2c6284e073930f964c3a1293c225ee39a" -dependencies = [ - "rustc_version 0.4.0", -] +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" @@ -666,9 +748,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chacha20" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01b72a433d0cf2aef113ba70f62634c56fddb0f244e6377185c56a7cadbd8f91" +checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6" dependencies = [ "cfg-if", "cipher", @@ -678,9 +760,9 @@ dependencies = [ [[package]] name = "chacha20poly1305" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b84ed6d1d5f7aa9bdde921a5090e0ca4d934d250ea3b402a5fab3a994e28a2a" +checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5" dependencies = [ "aead", "chacha20", @@ -888,9 +970,9 @@ dependencies = [ [[package]] name = "criterion" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1604dafd25fba2fe2d5895a9da139f8dc9b319a5fe5354ca137cbbce4e178d10" +checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" dependencies = [ "atty", "cast", @@ -914,9 +996,9 @@ dependencies = [ [[package]] name = "criterion-plot" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d00996de9f2f7559f7f4dc286073197f83e92256a59ed395f9aac01fe717da57" +checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876" dependencies = [ "cast", "itertools", @@ -924,9 +1006,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c02a4d71819009c192cf4872265391563fd6a84c81ff2c0f2a7026ca4c1d85c" +checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" dependencies = [ "cfg-if", "crossbeam-utils", @@ -934,9 +1016,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" +checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" dependencies = [ "cfg-if", "crossbeam-epoch", @@ -945,9 +1027,9 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.9" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07db9d94cbd326813772c968ccd25999e5f8ae22f4f8d1b11effa37ef6ce281d" +checksum = "045ebe27666471bb549370b4b0b3e51b07f56325befa4284db65fc89c02511b1" dependencies = [ "autocfg 1.1.0", "cfg-if", @@ -959,9 +1041,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d82ee10ce34d7bc12c2122495e7593a9c41347ecdd64185af4ecf72cb1a7f83" +checksum = "51887d4adc7b564537b15adcfb307936f8075dfcd5f00dde9a9f1d29383682bc" dependencies = [ "cfg-if", "once_cell", @@ -987,9 +1069,9 @@ dependencies = [ [[package]] name = "crypto-common" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5999502d32b9c48d492abe66392408144895020ec4709e549e840799f3bb74c0" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", "typenum", @@ -1052,7 +1134,7 @@ version = "3.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b37feaa84e6861e00a1f5e5aa8da3ee56d605c9992d33e082786754828e20865" dependencies = [ - "nix 0.24.1", + "nix 0.24.2", "winapi", ] @@ -1840,6 +1922,27 @@ dependencies = [ "tiny-keccak", ] +[[package]] +name = "ethereum-consensus" +version = "0.1.0" +source = "git+https://github.com/ralexstokes/ethereum-consensus#592eb44dc24403cc9d152f4b96683ab551533201" +dependencies = [ + "async-stream", + "blst", + "enr", + "hex", + "integer-sqrt", + "multiaddr 0.14.0", + "rand 0.8.5", + "serde", + "serde_json", + "sha2 0.9.9", + "ssz-rs", + "thiserror", + "tokio", + "tokio-stream", +] + [[package]] name = "ethereum-types" version = "0.12.1" @@ -1912,7 +2015,7 @@ dependencies = [ "serde_json", "thiserror", "tokio", - "tokio-tungstenite 0.17.1", + "tokio-tungstenite 0.17.2", "tracing", "tracing-futures", "url", @@ -1958,6 +2061,7 @@ dependencies = [ "eth2_serde_utils", "eth2_ssz", "eth2_ssz_types", + "ethereum-consensus", "ethers-core", "exit-future", "fork_choice", @@ -1967,6 +2071,7 @@ dependencies = [ "lazy_static", "lighthouse_metrics", "lru", + "mev-build-rs", "parking_lot 0.12.1", "rand 0.8.5", "reqwest", @@ -1975,6 +2080,7 @@ dependencies = [ "serde_json", "slog", "slot_clock", + "ssz-rs", "state_processing", "task_executor", "tempfile", @@ -2016,9 +2122,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" +checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" dependencies = [ "instant", ] @@ -2338,9 +2444,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" +checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d" [[package]] name = "git-version" @@ -2428,9 +2534,12 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db0d4cf898abf0081f964436dc980e96670a0f36863e4b83aaacdb65c9d7ccc3" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash", +] [[package]] name = "hashlink" @@ -2585,6 +2694,12 @@ dependencies = [ "pin-project-lite 0.2.9", ] +[[package]] +name = "http-range-header" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" + [[package]] name = "http_api" version = "0.1.0" @@ -2620,6 +2735,7 @@ dependencies = [ "tokio-stream", "tree_hash", "types", + "unused_port", "warp", "warp_utils", ] @@ -2665,9 +2781,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.19" +version = "0.14.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42dc3c131584288d375f2d07f822b0cb012d8c6fb899a5b9fdb3cb7eb9b6004f" +checksum = "02c929dc5c39e335a03c405292728118860721b10190d98c2a0f0efd5baafbac" dependencies = [ "bytes", "futures-channel", @@ -2828,7 +2944,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" dependencies = [ "autocfg 1.1.0", - "hashbrown 0.12.1", + "hashbrown 0.12.3", ] [[package]] @@ -2902,9 +3018,9 @@ checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" [[package]] name = "js-sys" -version = "0.3.58" +version = "0.3.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fac17f7123a73ca62df411b1bf727ccc805daa070338fda671c86dac1bdc27" +checksum = "258451ab10b34f8af53416d1fdab72c22e805f0c92a1136d59470ec0b11138b2" dependencies = [ "wasm-bindgen", ] @@ -3415,9 +3531,9 @@ dependencies = [ [[package]] name = "libsecp256k1" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0452aac8bab02242429380e9b2f94ea20cea2b37e2c1777a1358799bbe97f37" +checksum = "95b09eff1b35ed3b33b877ced3a691fc7a481919c7e29c53c906226fcf55e2a1" dependencies = [ "arrayref", "base64", @@ -3634,11 +3750,11 @@ dependencies = [ [[package]] name = "lru" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c84e6fe5655adc6ce00787cf7dcaf8dc4f998a0565d23eafc207a8b08ca3349a" +checksum = "e999beba7b6e8345721bd280141ed958096a2e4abdf74f67ff4ce49b4b54e47a" dependencies = [ - "hashbrown 0.11.2", + "hashbrown 0.12.3", ] [[package]] @@ -3703,6 +3819,12 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" +[[package]] +name = "matchit" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" + [[package]] name = "mdbx-sys" version = "0.11.6-4" @@ -3742,6 +3864,22 @@ dependencies = [ "safe_arith", ] +[[package]] +name = "mev-build-rs" +version = "0.2.0" +source = "git+https://github.com/ralexstokes/mev-rs?tag=v0.2.0#921fa3f7c3497839461964a5297dfe4f2cef3136" +dependencies = [ + "async-trait", + "axum", + "beacon-api-client", + "ethereum-consensus", + "serde", + "serde_json", + "ssz-rs", + "thiserror", + "tracing", +] + [[package]] name = "milagro_bls" version = "1.4.2" @@ -4035,9 +4173,9 @@ dependencies = [ [[package]] name = "nix" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f17df307904acd05aa8e32e97bb20f2a0df1728bbc2d771ae8f9a90463441e9" +checksum = "195cdbc1741b8134346d515b3a56a1c94b0912758009cfd53f99ea0f57b065fc" dependencies = [ "bitflags", "cfg-if", @@ -4162,9 +4300,9 @@ dependencies = [ [[package]] name = "object" -version = "0.28.4" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e42c982f2d955fac81dd7e1d0e1426a7d702acd9c98d19ab01083a6a0328c424" +checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53" dependencies = [ "memchr", ] @@ -4189,9 +4327,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.40" +version = "0.10.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb81a6430ac911acb25fe5ac8f1d2af1b4ea8a4fdfda0f1ee4292af2e2d8eb0e" +checksum = "618febf65336490dfcf20b73f885f5651a0c89c64c2d4a8c3662585a70bf5bd0" dependencies = [ "bitflags", "cfg-if", @@ -4230,9 +4368,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.74" +version = "0.9.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "835363342df5fba8354c5b453325b110ffd54044e588c539cf2f20a8014e4cb1" +checksum = "e5f9bd0c2710541a3cda73d6f9ac4f1b240de4ae261065d309dbe73d9dceb42f" dependencies = [ "autocfg 1.1.0", "cc", @@ -4293,7 +4431,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9182e4a71cae089267ab03e67c99368db7cd877baf50f931e5d6d4b71e195ac0" dependencies = [ "arrayvec", - "bitvec 1.0.0", + "bitvec 1.0.1", "byte-slice-cast", "impl-trait-for-tuples", "parity-scale-codec-derive 3.1.3", @@ -4404,9 +4542,9 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "pem" -version = "1.0.2" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9a3b09a20e374558580a4914d3b7d89bd61b954a5a5e1dcbea98753addb1947" +checksum = "03c64931a1a212348ec4f3b4362585eca7159d0d09cbdf4a7f74f02173596fd4" dependencies = [ "base64", ] @@ -4419,10 +4557,11 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pest" -version = "2.1.3" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f4872ae94d7b90ae48754df22fd42ad52ce740b8f370b03da4835417403e53" +checksum = "69486e2b8c2d2aeb9762db7b4e00b0331156393555cff467f4163ff06821eef8" dependencies = [ + "thiserror", "ucd-trie", ] @@ -4539,9 +4678,9 @@ checksum = "e8d0eef3571242013a0d5dc84861c3ae4a652e56e12adf8bdc26ff5f8cb34c94" [[package]] name = "plotters" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a3fd9ec30b9749ce28cd91f255d569591cdf937fe280c312143e3c4bad6f2a" +checksum = "9428003b84df1496fb9d6eeee9c5f8145cb41ca375eb0dad204328888832811f" dependencies = [ "num-traits", "plotters-backend", @@ -4552,15 +4691,15 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.2" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d88417318da0eaf0fdcdb51a0ee6c3bed624333bff8f946733049380be67ac1c" +checksum = "193228616381fecdc1224c62e96946dfbc73ff4384fba576e052ff8c1bea8142" [[package]] name = "plotters-svg" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "521fa9638fa597e1dc53e9412a4f9cefb01187ee1f7413076f9e6749e2885ba9" +checksum = "e0918736323d1baff32ee0eade54984f6f201ad7e97d5cfb5d6ab4a358529615" dependencies = [ "plotters-backend", ] @@ -4662,9 +4801,9 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro2" -version = "1.0.40" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd96a1e8ed2596c337f8eae5f24924ec83f5ad5ab21ea8e455d3566c69fbcaf7" +checksum = "c278e965f1d8cf32d6e0e96de3d3e79712178ae67986d9cf9151f51e95aac89b" dependencies = [ "unicode-ident", ] @@ -5065,9 +5204,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.2.13" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ "bitflags", ] @@ -5330,9 +5469,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0a5f7c728f5d284929a1cccb5bc19884422bfe6ef4d6c409da2c41838983fcf" +checksum = "24c8ad4f0c00e1eb5bc7614d236a7f1300e3dbd76b68cac8e06fb00b015ad8d8" [[package]] name = "rw-stream-sink" @@ -5562,9 +5701,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.138" +version = "1.0.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1578c6245786b9d168c5447eeacfb96856573ca56c9d68fdcf394be134882a47" +checksum = "fc855a42c7967b7c369eb5860f7164ef1f6f81c20c7cc1141f2a604e18723b03" dependencies = [ "serde_derive", ] @@ -5591,9 +5730,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.138" +version = "1.0.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "023e9b1467aef8a10fb88f25611870ada9800ef7e22afce356bb0d2387b6f27c" +checksum = "6f2122636b9fe3b81f1cb25099fcf2d3f542cdb1d45940d56c713158884a05da" dependencies = [ "proc-macro2", "quote", @@ -5658,9 +5797,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.24" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707d15895415db6628332b737c838b88c598522e4dc70647e59b72312924aebc" +checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" dependencies = [ "indexmap", "ryu", @@ -5804,9 +5943,12 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" +checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +dependencies = [ + "autocfg 1.1.0", +] [[package]] name = "slasher" @@ -6063,6 +6205,31 @@ dependencies = [ "der 0.5.1", ] +[[package]] +name = "ssz-rs" +version = "0.8.0" +source = "git+https://github.com/ralexstokes/ssz-rs#bd7cfb5a836e28747e6ce5e570234d14df0b24f7" +dependencies = [ + "bitvec 1.0.1", + "hex", + "lazy_static", + "num-bigint", + "serde", + "sha2 0.9.9", + "ssz-rs-derive", + "thiserror", +] + +[[package]] +name = "ssz-rs-derive" +version = "0.8.0" +source = "git+https://github.com/ralexstokes/ssz-rs#bd7cfb5a836e28747e6ce5e570234d14df0b24f7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "stable_deref_trait" version = "1.2.0" @@ -6211,6 +6378,12 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "sync_wrapper" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8" + [[package]] name = "synstructure" version = "0.12.6" @@ -6453,10 +6626,11 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.19.2" +version = "1.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c51a52ed6686dd62c320f9b89299e9dfb46f730c7a48e635c19f21d116cb1439" +checksum = "7a8325f63a7d4774dd041e363b2409ed1c5cbbd0f867795e661df066b2b0a581" dependencies = [ + "autocfg 1.1.0", "bytes", "libc", "memchr", @@ -6551,16 +6725,16 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.17.1" +version = "0.17.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06cda1232a49558c46f8a504d5b93101d42c0bf7f911f12a105ba48168f821ae" +checksum = "f714dd15bead90401d77e04243611caec13726c2408afd5b31901dfcdcb3b181" dependencies = [ "futures-util", "log", "rustls 0.20.6", "tokio", "tokio-rustls 0.23.4", - "tungstenite 0.17.2", + "tungstenite 0.17.3", "webpki 0.22.0", "webpki-roots", ] @@ -6604,6 +6778,47 @@ dependencies = [ "serde", ] +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "pin-project 1.0.11", + "pin-project-lite 0.2.9", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c530c8675c1dbf98facee631536fa116b5fb6382d7dd6dc1b118d970eafe3ba" +dependencies = [ + "bitflags", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-range-header", + "pin-project-lite 0.2.9", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" + [[package]] name = "tower-service" version = "0.3.2" @@ -6667,9 +6882,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a713421342a5a666b7577783721d3117f1b69a393df803ee17bb73b1e122a59" +checksum = "60db860322da191b40952ad9affe65ea23e7dd6a5c442c2c42865810c6ab8e6b" dependencies = [ "ansi_term", "matchers", @@ -6798,9 +7013,9 @@ dependencies = [ [[package]] name = "tungstenite" -version = "0.17.2" +version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d96a2dea40e7570482f28eb57afbe42d97551905da6a9400acc5c328d24004f5" +checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" dependencies = [ "base64", "byteorder", @@ -6922,9 +7137,9 @@ checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" [[package]] name = "unicode-ident" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bd2fe26506023ed7b5e1e315add59d6f584c621d037f9368fea9cfb988f368c" +checksum = "15c61ba63f9235225a22310255a29b806b907c9b8c964bcbd0a2c70f3f2deea7" [[package]] name = "unicode-normalization" @@ -7210,9 +7425,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.81" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c53b543413a17a202f4be280a7e5c62a1c69345f5de525ee64f8cfdbc954994" +checksum = "fc7652e3f6c4706c8d9cd54832c4a4ccb9b5336e2c3bd154d5cccfbf1c1f5f7d" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -7220,13 +7435,13 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.81" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5491a68ab4500fa6b4d726bd67408630c3dbe9c4fe7bda16d5c82a1fd8c7340a" +checksum = "662cd44805586bd52971b9586b1df85cdbbd9112e4ef4d8f41559c334dc6ac3f" dependencies = [ "bumpalo", - "lazy_static", "log", + "once_cell", "proc-macro2", "quote", "syn", @@ -7235,9 +7450,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.31" +version = "0.4.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de9a9cec1733468a8c657e57fa2413d2ae2c0129b95e87c5b72b8ace4d13f31f" +checksum = "fa76fb221a1f8acddf5b54ace85912606980ad661ac7a503b4570ffd3a624dad" dependencies = [ "cfg-if", "js-sys", @@ -7247,9 +7462,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.81" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c441e177922bc58f1e12c022624b6216378e5febc2f0533e41ba443d505b80aa" +checksum = "b260f13d3012071dfb1512849c033b1925038373aea48ced3012c09df952c602" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -7257,9 +7472,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.81" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d94ac45fcf608c1f45ef53e748d35660f168490c10b23704c7779ab8f5c3048" +checksum = "5be8e654bdd9b79216c2929ab90721aa82faf65c48cdf08bdc4e7f51357b80da" dependencies = [ "proc-macro2", "quote", @@ -7270,15 +7485,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.81" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a89911bd99e5f3659ec4acf9c4d93b0a90fe4a2a11f15328472058edc5261be" +checksum = "6598dd0bd3c7d51095ff6531a5b23e02acdc81804e30d8f07afb77b7215a140a" [[package]] name = "wasm-bindgen-test" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b30cf2cba841a812f035c40c50f53eb9c56181192a9dd2c71b65e6a87a05ba" +checksum = "513df541345bb9fcc07417775f3d51bbb677daf307d8035c0afafd87dc2e6599" dependencies = [ "console_error_panic_hook", "js-sys", @@ -7290,9 +7505,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ad594bf33e73cafcac2ae9062fc119d4f75f9c77e25022f91c9a64bd5b6463" +checksum = "6150d36a03e90a3cf6c12650be10626a9902d70c5270fd47d7a47e5389a10d56" dependencies = [ "proc-macro2", "quote", @@ -7315,9 +7530,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.58" +version = "0.3.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fed94beee57daf8dd7d51f2b15dc2bcde92d7a72304cdf662a4371008b71b90" +checksum = "ed055ab27f941423197eb86b2035720b1a3ce40504df082cac2ecc6ed73335a1" dependencies = [ "js-sys", "wasm-bindgen", @@ -7417,9 +7632,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.22.3" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d8de8415c823c8abd270ad483c6feeac771fad964890779f9a8cb24fbbc1bf" +checksum = "f1c760f0d366a6c24a02ed7816e23e691f5d92291f94d15e836006fd11b04daf" dependencies = [ "webpki 0.22.0", ] @@ -7635,9 +7850,9 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.4.3" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d68d9dcec5f9b43a30d38c49f91dfedfaac384cb8f085faca366c26207dd1619" +checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" dependencies = [ "zeroize_derive", ] diff --git a/account_manager/src/validator/import.rs b/account_manager/src/validator/import.rs index 4c7140df39c..c581866a258 100644 --- a/account_manager/src/validator/import.rs +++ b/account_manager/src/validator/import.rs @@ -280,6 +280,8 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin password_opt, graffiti, suggested_fee_recipient, + None, + None, ) .map_err(|e| format!("Unable to create new validator definition: {:?}", e))?; diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index c8b82e3d28a..092f3064d5c 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -62,6 +62,7 @@ sensitive_url = { path = "../../common/sensitive_url" } superstruct = "0.5.0" hex = "0.4.2" exit-future = "0.2.0" +unused_port = {path = "../../common/unused_port"} [[test]] name = "beacon_chain_tests" diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 2e944f2939e..326d8b6c671 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -53,7 +53,9 @@ use crate::BeaconForkChoiceStore; use crate::BeaconSnapshot; use crate::{metrics, BeaconChainError}; use eth2::types::{EventKind, SseBlock, SyncDuty}; -use execution_layer::{ExecutionLayer, PayloadAttributes, PayloadStatus}; +use execution_layer::{ + BuilderParams, ChainHealth, ExecutionLayer, FailedCondition, PayloadAttributes, PayloadStatus, +}; use fork_choice::{ AttestationFromBlock, ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters, InvalidationOperation, PayloadVerificationStatus, @@ -3315,10 +3317,21 @@ impl BeaconChain { let proposer_index = state.get_beacon_proposer_index(state.slot(), &self.spec)? as u64; - let pubkey_opt = state + let pubkey = state .validators() .get(proposer_index as usize) - .map(|v| v.pubkey); + .map(|v| v.pubkey) + .ok_or(BlockProductionError::BeaconChain( + BeaconChainError::ValidatorIndexUnknown(proposer_index as usize), + ))?; + + let builder_params = BuilderParams { + pubkey, + slot: state.slot(), + chain_health: self + .is_healthy() + .map_err(BlockProductionError::BeaconChain)?, + }; // If required, start the process of loading an execution payload from the EL early. This // allows it to run concurrently with things like attestation packing. @@ -3326,7 +3339,7 @@ impl BeaconChain { BeaconState::Base(_) | BeaconState::Altair(_) => None, BeaconState::Merge(_) => { let prepare_payload_handle = - get_execution_payload(self.clone(), &state, proposer_index, pubkey_opt)?; + get_execution_payload(self.clone(), &state, proposer_index, builder_params)?; Some(prepare_payload_handle) } }; @@ -4539,6 +4552,74 @@ impl BeaconChain { .map(|duration| (fork_name, duration)) } + /// This method serves to get a sense of the current chain health. It is used in block proposal + /// to determine whether we should outsource payload production duties. + /// + /// Since we are likely calling this during the slot we are going to propose in, don't take into + /// account the current slot when accounting for skips. + pub fn is_healthy(&self) -> Result { + // Check if the merge has been finalized. + if let Some(finalized_hash) = self + .canonical_head + .cached_head() + .forkchoice_update_parameters() + .finalized_hash + { + if ExecutionBlockHash::zero() == finalized_hash { + return Ok(ChainHealth::PreMerge); + } + } else { + return Ok(ChainHealth::PreMerge); + }; + + if self.config.builder_fallback_disable_checks { + return Ok(ChainHealth::Healthy); + } + + let current_slot = self.slot()?; + + // Check slots at the head of the chain. + let prev_slot = current_slot.saturating_sub(Slot::new(1)); + let head_skips = prev_slot.saturating_sub(self.canonical_head.cached_head().head_slot()); + let head_skips_check = head_skips.as_usize() <= self.config.builder_fallback_skips; + + // Check if finalization is advancing. + let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); + let epochs_since_finalization = current_epoch.saturating_sub( + self.canonical_head + .cached_head() + .finalized_checkpoint() + .epoch, + ); + let finalization_check = epochs_since_finalization.as_usize() + <= self.config.builder_fallback_epochs_since_finalization; + + // Check skip slots in the last `SLOTS_PER_EPOCH`. + let start_slot = current_slot.saturating_sub(T::EthSpec::slots_per_epoch()); + let mut epoch_skips = 0; + for slot in start_slot.as_u64()..current_slot.as_u64() { + if self + .block_root_at_slot_skips_none(Slot::new(slot))? + .is_none() + { + epoch_skips += 1; + } + } + let epoch_skips_check = epoch_skips <= self.config.builder_fallback_skips_per_epoch; + + if !head_skips_check { + Ok(ChainHealth::Unhealthy(FailedCondition::Skips)) + } else if !finalization_check { + Ok(ChainHealth::Unhealthy( + FailedCondition::EpochsSinceFinalization, + )) + } else if !epoch_skips_check { + Ok(ChainHealth::Unhealthy(FailedCondition::SkipsPerEpoch)) + } else { + Ok(ChainHealth::Healthy) + } + } + pub fn dump_as_dot(&self, output: &mut W) { let canonical_head_hash = self.canonical_head.cached_head().head_block_root(); let mut visited: HashSet = HashSet::new(); diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index d5e3d198148..2c43ca53ed3 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -24,6 +24,16 @@ pub struct ChainConfig { /// /// If set to 0 then block proposal will not wait for fork choice at all. pub fork_choice_before_proposal_timeout_ms: u64, + /// Number of skip slots in a row before the BN refuses to use connected builders during payload construction. + pub builder_fallback_skips: usize, + /// Number of skip slots in the past `SLOTS_PER_EPOCH` before the BN refuses to use connected + /// builders during payload construction. + pub builder_fallback_skips_per_epoch: usize, + /// Number of epochs since finalization before the BN refuses to use connected builders during + /// payload construction. + pub builder_fallback_epochs_since_finalization: usize, + /// Whether any chain health checks should be considered when deciding whether to use the builder API. + pub builder_fallback_disable_checks: bool, pub count_unrealized: bool, } @@ -36,6 +46,11 @@ impl Default for ChainConfig { enable_lock_timeouts: true, max_network_size: 10 * 1_048_576, // 10M fork_choice_before_proposal_timeout_ms: DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT, + // Builder fallback configs that are set in `clap` will override these. + builder_fallback_skips: 3, + builder_fallback_skips_per_epoch: 8, + builder_fallback_epochs_since_finalization: 3, + builder_fallback_disable_checks: false, count_unrealized: false, } } diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 189cb3fdeae..604fb6bea3b 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -138,6 +138,7 @@ pub enum BeaconChainError { new_slot: Slot, }, AltairForkDisabled, + BuilderMissing, ExecutionLayerMissing, BlockVariantLacksExecutionPayload(Hash256), ExecutionLayerErrorPayloadReconstruction(ExecutionBlockHash, execution_layer::Error), diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 81193563cb4..fade47e1d35 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -11,7 +11,7 @@ use crate::{ BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, BlockProductionError, ExecutionPayloadError, }; -use execution_layer::PayloadStatus; +use execution_layer::{BuilderParams, PayloadStatus}; use fork_choice::{InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Block as ProtoBlock, ExecutionStatus}; use slog::debug; @@ -303,12 +303,11 @@ pub fn get_execution_payload< chain: Arc>, state: &BeaconState, proposer_index: u64, - pubkey: Option, + builder_params: BuilderParams, ) -> Result, BlockProductionError> { // Compute all required values from the `state` now to avoid needing to pass it into a spawned // task. let spec = &chain.spec; - let slot = state.slot(); let current_epoch = state.current_epoch(); let is_merge_transition_complete = is_merge_transition_complete(state); let timestamp = compute_timestamp_at_slot(state, spec).map_err(BeaconStateError::from)?; @@ -325,13 +324,12 @@ pub fn get_execution_payload< async move { prepare_execution_payload::( &chain, - slot, is_merge_transition_complete, timestamp, random, proposer_index, - pubkey, latest_execution_payload_header_block_hash, + builder_params, ) .await }, @@ -359,19 +357,18 @@ pub fn get_execution_payload< #[allow(clippy::too_many_arguments)] pub async fn prepare_execution_payload( chain: &Arc>, - slot: Slot, is_merge_transition_complete: bool, timestamp: u64, random: Hash256, proposer_index: u64, - pubkey: Option, latest_execution_payload_header_block_hash: ExecutionBlockHash, + builder_params: BuilderParams, ) -> Result where T: BeaconChainTypes, Payload: ExecPayload + Default, { - let current_epoch = slot.epoch(T::EthSpec::slots_per_epoch()); + let current_epoch = builder_params.slot.epoch(T::EthSpec::slots_per_epoch()); let spec = &chain.spec; let execution_layer = chain .execution_layer @@ -432,9 +429,9 @@ where timestamp, random, proposer_index, - pubkey, - slot, forkchoice_update_params, + builder_params, + &chain.spec, ) .await .map_err(BlockProductionError::GetPayloadFailed)?; diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 9cb734f2a08..57a1da9dc65 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -15,7 +15,7 @@ mod early_attester_cache; mod errors; pub mod eth1_chain; pub mod events; -mod execution_payload; +pub mod execution_payload; pub mod fork_choice_signal; pub mod fork_revert; mod head_tracker; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 1f19465c083..6771861dfde 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -14,7 +14,9 @@ use bls::get_withdrawal_credentials; use execution_layer::test_utils::DEFAULT_JWT_SECRET; use execution_layer::{ auth::JwtKey, - test_utils::{ExecutionBlockGenerator, MockExecutionLayer, DEFAULT_TERMINAL_BLOCK}, + test_utils::{ + ExecutionBlockGenerator, MockExecutionLayer, TestingBuilder, DEFAULT_TERMINAL_BLOCK, + }, ExecutionLayer, }; use fork_choice::CountUnrealized; @@ -154,6 +156,7 @@ pub struct Builder { store_mutator: Option>, execution_layer: Option>, mock_execution_layer: Option>, + mock_builder: Option>, runtime: TestRuntime, log: Logger, } @@ -285,6 +288,7 @@ where store_mutator: None, execution_layer: None, mock_execution_layer: None, + mock_builder: None, runtime, log, } @@ -388,6 +392,38 @@ where self } + pub fn mock_execution_layer_with_builder(mut self, beacon_url: SensitiveUrl) -> Self { + // Get a random unused port + let port = unused_port::unused_tcp_port().unwrap(); + let builder_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(); + + let spec = self.spec.clone().expect("cannot build without spec"); + let mock_el = MockExecutionLayer::new( + self.runtime.task_executor.clone(), + spec.terminal_total_difficulty, + DEFAULT_TERMINAL_BLOCK, + spec.terminal_block_hash, + spec.terminal_block_hash_activation_epoch, + Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), + Some(builder_url.clone()), + ) + .move_to_terminal_block(); + + let mock_el_url = SensitiveUrl::parse(mock_el.server.url().as_str()).unwrap(); + + self.mock_builder = Some(TestingBuilder::new( + mock_el_url, + builder_url, + beacon_url, + spec, + self.runtime.task_executor.clone(), + )); + self.execution_layer = Some(mock_el.el.clone()); + self.mock_execution_layer = Some(mock_el); + + self + } + /// Instruct the mock execution engine to always return a "valid" response to any payload it is /// asked to execute. pub fn mock_execution_layer_all_payloads_valid(self) -> Self { @@ -456,6 +492,7 @@ where shutdown_receiver: Arc::new(Mutex::new(shutdown_receiver)), runtime: self.runtime, mock_execution_layer: self.mock_execution_layer, + mock_builder: self.mock_builder.map(Arc::new), rng: make_rng(), } } @@ -474,6 +511,7 @@ pub struct BeaconChainHarness { pub runtime: TestRuntime, pub mock_execution_layer: Option>, + pub mock_builder: Option>>, pub rng: Mutex, } diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs index 500f5aa9ffe..3517d06b15b 100644 --- a/beacon_node/builder_client/src/lib.rs +++ b/beacon_node/builder_client/src/lib.rs @@ -1,4 +1,3 @@ -use eth2::ok_or_error; use eth2::types::builder_bid::SignedBuilderBid; use eth2::types::{ BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, ExecutionPayload, @@ -6,23 +5,33 @@ use eth2::types::{ Slot, }; pub use eth2::Error; +use eth2::{ok_or_error, StatusCode}; use reqwest::{IntoUrl, Response}; use sensitive_url::SensitiveUrl; use serde::de::DeserializeOwned; use serde::Serialize; use std::time::Duration; -pub const DEFAULT_GET_HEADER_TIMEOUT_MILLIS: u64 = 500; +pub const DEFAULT_TIMEOUT_MILLIS: u64 = 15000; + +/// This timeout is in accordance with v0.2.0 of the [builder specs](https://github.com/flashbots/mev-boost/pull/20). +pub const DEFAULT_GET_HEADER_TIMEOUT_MILLIS: u64 = 1000; #[derive(Clone)] pub struct Timeouts { get_header: Duration, + post_validators: Duration, + post_blinded_blocks: Duration, + get_builder_status: Duration, } impl Default for Timeouts { fn default() -> Self { Self { get_header: Duration::from_millis(DEFAULT_GET_HEADER_TIMEOUT_MILLIS), + post_validators: Duration::from_millis(DEFAULT_TIMEOUT_MILLIS), + post_blinded_blocks: Duration::from_millis(DEFAULT_TIMEOUT_MILLIS), + get_builder_status: Duration::from_millis(DEFAULT_TIMEOUT_MILLIS), } } } @@ -51,14 +60,6 @@ impl BuilderHttpClient { }) } - async fn get(&self, url: U) -> Result { - self.get_response_with_timeout(url, None) - .await? - .json() - .await - .map_err(Error::Reqwest) - } - async fn get_with_timeout( &self, url: U, @@ -104,14 +105,13 @@ impl BuilderHttpClient { &self, url: U, body: &T, + timeout: Option, ) -> Result { - let response = self - .client - .post(url) - .json(body) - .send() - .await - .map_err(Error::Reqwest)?; + let mut builder = self.client.post(url); + if let Some(timeout) = timeout { + builder = builder.timeout(timeout); + } + let response = builder.json(body).send().await.map_err(Error::Reqwest)?; ok_or_error(response).await } @@ -129,7 +129,8 @@ impl BuilderHttpClient { .push("builder") .push("validators"); - self.post_generic(path, &validator, None).await?; + self.post_generic(path, &validator, Some(self.timeouts.post_validators)) + .await?; Ok(()) } @@ -148,7 +149,11 @@ impl BuilderHttpClient { .push("blinded_blocks"); Ok(self - .post_with_raw_response(path, &blinded_block) + .post_with_raw_response( + path, + &blinded_block, + Some(self.timeouts.post_blinded_blocks), + ) .await? .json() .await?) @@ -160,7 +165,7 @@ impl BuilderHttpClient { slot: Slot, parent_hash: ExecutionBlockHash, pubkey: &PublicKeyBytes, - ) -> Result>, Error> { + ) -> Result>>, Error> { let mut path = self.server.full.clone(); path.path_segments_mut() @@ -173,7 +178,13 @@ impl BuilderHttpClient { .push(format!("{parent_hash:?}").as_str()) .push(pubkey.as_hex_string().as_str()); - self.get_with_timeout(path, self.timeouts.get_header).await + let resp = self.get_with_timeout(path, self.timeouts.get_header).await; + + if matches!(resp, Err(Error::StatusCode(StatusCode::NO_CONTENT))) { + Ok(None) + } else { + resp.map(Some) + } } /// `GET /eth/v1/builder/status` @@ -187,6 +198,7 @@ impl BuilderHttpClient { .push("builder") .push("status"); - self.get(path).await + self.get_with_timeout(path, self.timeouts.get_builder_status) + .await } } diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 26e4ba52ef2..83f9454f8a7 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -40,3 +40,7 @@ lazy_static = "1.4.0" ethers-core = { git = "https://github.com/gakonst/ethers-rs", rev = "02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" } builder_client = { path = "../builder_client" } fork_choice = { path = "../../consensus/fork_choice" } +mev-build-rs = {git = "https://github.com/ralexstokes/mev-rs", tag = "v0.2.0"} +ethereum-consensus = {git = "https://github.com/ralexstokes/ethereum-consensus"} +ssz-rs = {git = "https://github.com/ralexstokes/ssz-rs"} + diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 0316cf3993a..9ed38b61b09 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -1,9 +1,6 @@ use super::*; use serde::{Deserialize, Serialize}; -use types::{ - EthSpec, ExecutionBlockHash, ExecutionPayloadHeader, FixedVector, Transaction, Unsigned, - VariableList, -}; +use types::{EthSpec, ExecutionBlockHash, FixedVector, Transaction, Unsigned, VariableList}; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -432,59 +429,6 @@ impl From for JsonForkchoiceUpdatedV1Response { } } -#[derive(Debug, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "SCREAMING_SNAKE_CASE")] -pub enum JsonProposeBlindedBlockResponseStatus { - Valid, - Invalid, - Syncing, -} -#[derive(Debug, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -#[serde(bound = "E: EthSpec")] -pub struct JsonProposeBlindedBlockResponse { - pub result: ExecutionPayload, - pub error: Option, -} - -impl From> for ExecutionPayload { - fn from(j: JsonProposeBlindedBlockResponse) -> Self { - let JsonProposeBlindedBlockResponse { result, error: _ } = j; - result - } -} - -impl From for ProposeBlindedBlockResponseStatus { - fn from(j: JsonProposeBlindedBlockResponseStatus) -> Self { - match j { - JsonProposeBlindedBlockResponseStatus::Valid => { - ProposeBlindedBlockResponseStatus::Valid - } - JsonProposeBlindedBlockResponseStatus::Invalid => { - ProposeBlindedBlockResponseStatus::Invalid - } - JsonProposeBlindedBlockResponseStatus::Syncing => { - ProposeBlindedBlockResponseStatus::Syncing - } - } - } -} -impl From for JsonProposeBlindedBlockResponseStatus { - fn from(f: ProposeBlindedBlockResponseStatus) -> Self { - match f { - ProposeBlindedBlockResponseStatus::Valid => { - JsonProposeBlindedBlockResponseStatus::Valid - } - ProposeBlindedBlockResponseStatus::Invalid => { - JsonProposeBlindedBlockResponseStatus::Invalid - } - ProposeBlindedBlockResponseStatus::Syncing => { - JsonProposeBlindedBlockResponseStatus::Syncing - } - } - } -} - #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct TransitionConfigurationV1 { diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 5b82018749d..aea952a57db 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -4,6 +4,7 @@ //! This crate only provides useful functionality for "The Merge", it does not provide any of the //! deposit-contract functionality that the `beacon_node/eth1` crate already provides. +use crate::payload_cache::PayloadCache; use auth::{strip_prefix, Auth, JwtKey}; use builder_client::BuilderHttpClient; use engine_api::Error as ApiError; @@ -31,13 +32,14 @@ use tokio::{ time::sleep, }; use types::{ - BlindedPayload, BlockType, ChainSpec, Epoch, ExecPayload, ExecutionBlockHash, + BlindedPayload, BlockType, ChainSpec, Epoch, ExecPayload, ExecutionBlockHash, ForkName, ProposerPreparationData, PublicKeyBytes, SignedBeaconBlock, Slot, }; mod engine_api; mod engines; mod metrics; +pub mod payload_cache; mod payload_status; pub mod test_utils; @@ -69,6 +71,7 @@ pub enum Error { NoPayloadBuilder, ApiError(ApiError), Builder(builder_client::Error), + NoHeaderFromBuilder, EngineError(Box), NotSynced, ShuttingDown, @@ -101,6 +104,26 @@ pub struct Proposer { payload_attributes: PayloadAttributes, } +/// Information from the beacon chain that is necessary for querying the builder API. +pub struct BuilderParams { + pub pubkey: PublicKeyBytes, + pub slot: Slot, + pub chain_health: ChainHealth, +} + +pub enum ChainHealth { + Healthy, + Unhealthy(FailedCondition), + PreMerge, +} + +#[derive(Debug)] +pub enum FailedCondition { + Skips, + SkipsPerEpoch, + EpochsSinceFinalization, +} + struct Inner { engine: Arc, builder: Option, @@ -110,7 +133,7 @@ struct Inner { execution_blocks: Mutex>, proposers: RwLock>, executor: TaskExecutor, - phantom: std::marker::PhantomData, + payload_cache: PayloadCache, log: Logger, } @@ -212,7 +235,7 @@ impl ExecutionLayer { proposers: RwLock::new(HashMap::new()), execution_blocks: Mutex::new(LruCache::new(EXECUTION_BLOCKS_LRU_CACHE_SIZE)), executor, - phantom: std::marker::PhantomData, + payload_cache: PayloadCache::default(), log, }; @@ -231,6 +254,16 @@ impl ExecutionLayer { &self.inner.builder } + /// Cache a full payload, keyed on the `tree_hash_root` of its `transactions` field. + fn cache_payload(&self, payload: &ExecutionPayload) -> Option> { + self.inner.payload_cache.put(payload.clone()) + } + + /// Attempt to retrieve a full payload from the payload cache by the `transactions_root`. + pub fn get_payload_by_root(&self, root: &Hash256) -> Option> { + self.inner.payload_cache.pop(root) + } + pub fn executor(&self) -> &TaskExecutor { &self.inner.executor } @@ -487,9 +520,9 @@ impl ExecutionLayer { timestamp: u64, prev_randao: Hash256, proposer_index: u64, - pubkey: Option, - slot: Slot, forkchoice_update_params: ForkchoiceUpdateParameters, + builder_params: BuilderParams, + spec: &ChainSpec, ) -> Result { let suggested_fee_recipient = self.get_suggested_fee_recipient(proposer_index).await; @@ -504,9 +537,9 @@ impl ExecutionLayer { timestamp, prev_randao, suggested_fee_recipient, - pubkey, - slot, forkchoice_update_params, + builder_params, + spec, ) .await } @@ -534,36 +567,137 @@ impl ExecutionLayer { timestamp: u64, prev_randao: Hash256, suggested_fee_recipient: Address, - pubkey_opt: Option, - slot: Slot, forkchoice_update_params: ForkchoiceUpdateParameters, + builder_params: BuilderParams, + spec: &ChainSpec, ) -> Result { - //FIXME(sean) fallback logic included in PR #3134 - - // Don't attempt to outsource payload construction until after the merge transition has been - // finalized. We want to be conservative with payload construction until then. - if let (Some(builder), Some(pubkey)) = (self.builder(), pubkey_opt) { - if forkchoice_update_params - .finalized_hash - .map_or(false, |finalized_block_hash| { - finalized_block_hash != ExecutionBlockHash::zero() - }) - { - info!( - self.log(), - "Requesting blinded header from connected builder"; - "slot" => ?slot, - "pubkey" => ?pubkey, - "parent_hash" => ?parent_hash, - ); - return builder - .get_builder_header::(slot, parent_hash, &pubkey) - .await - .map(|d| d.data.message.header) - .map_err(Error::Builder); + if let Some(builder) = self.builder() { + let slot = builder_params.slot; + let pubkey = builder_params.pubkey; + + match builder_params.chain_health { + ChainHealth::Healthy => { + info!( + self.log(), + "Requesting blinded header from connected builder"; + "slot" => ?slot, + "pubkey" => ?pubkey, + "parent_hash" => ?parent_hash, + ); + let (relay_result, local_result) = tokio::join!( + builder.get_builder_header::(slot, parent_hash, &pubkey), + self.get_full_payload_caching( + parent_hash, + timestamp, + prev_randao, + suggested_fee_recipient, + forkchoice_update_params, + ) + ); + + return match (relay_result, local_result) { + (Err(e), Ok(local)) => { + warn!( + self.log(), + "Unable to retrieve a payload from a connected \ + builder, falling back to the local execution client: {e:?}" + ); + Ok(local) + } + (Ok(None), Ok(local)) => { + warn!( + self.log(), + "No payload provided by connected builder. \ + Attempting to propose through local execution engine" + ); + Ok(local) + } + (Ok(Some(relay)), Ok(local)) => { + let is_signature_valid = relay.data.verify_signature(spec); + let header = relay.data.message.header; + + info!( + self.log(), + "Received a payload header from the connected builder"; + "block_hash" => ?header.block_hash(), + ); + + if header.parent_hash() != parent_hash { + warn!( + self.log(), + "Invalid parent hash from connected builder, \ + falling back to local execution engine." + ); + Ok(local) + } else if header.prev_randao() != prev_randao { + warn!( + self.log(), + "Invalid prev randao from connected builder, \ + falling back to local execution engine." + ); + Ok(local) + } else if header.timestamp() != local.timestamp() { + warn!( + self.log(), + "Invalid timestamp from connected builder, \ + falling back to local execution engine." + ); + Ok(local) + } else if header.block_number() != local.block_number() { + warn!( + self.log(), + "Invalid block number from connected builder, \ + falling back to local execution engine." + ); + Ok(local) + } else if !matches!(relay.version, Some(ForkName::Merge)) { + // Once fork information is added to the payload, we will need to + // check that the local and relay payloads match. At this point, if + // we are requesting a payload at all, we have to assume this is + // the Bellatrix fork. + warn!( + self.log(), + "Invalid fork from connected builder, falling \ + back to local execution engine." + ); + Ok(local) + } else if !is_signature_valid { + let pubkey_bytes = relay.data.message.pubkey; + warn!(self.log(), "Invalid signature for pubkey {pubkey_bytes} on \ + bid from connected builder, falling back to local execution engine."); + Ok(local) + } else { + if header.fee_recipient() != suggested_fee_recipient { + info!( + self.log(), + "Fee recipient from connected builder does \ + not match, using it anyways." + ); + } + Ok(header) + } + } + (relay_result, Err(local_error)) => { + warn!(self.log(), "Failure from local execution engine. Attempting to \ + propose through connected builder"; "error" => ?local_error); + relay_result + .map_err(Error::Builder)? + .ok_or(Error::NoHeaderFromBuilder) + .map(|d| d.data.message.header) + } + }; + } + ChainHealth::Unhealthy(condition) => { + info!(self.log(), "Due to poor chain health the local execution engine will be used \ + for payload construction. To adjust chain health conditions \ + Use `builder-fallback` prefixed flags"; + "failed_condition" => ?condition) + } + // Intentional no-op, so we never attempt builder API proposals pre-merge. + ChainHealth::PreMerge => (), } } - self.get_full_payload::( + self.get_full_payload_caching( parent_hash, timestamp, prev_randao, @@ -593,6 +727,26 @@ impl ExecutionLayer { .await } + /// Get a full payload and cache its result in the execution layer's payload cache. + async fn get_full_payload_caching>( + &self, + parent_hash: ExecutionBlockHash, + timestamp: u64, + prev_randao: Hash256, + suggested_fee_recipient: Address, + forkchoice_update_params: ForkchoiceUpdateParameters, + ) -> Result { + self.get_full_payload_with( + parent_hash, + timestamp, + prev_randao, + suggested_fee_recipient, + forkchoice_update_params, + Self::cache_payload, + ) + .await + } + async fn get_full_payload_with>( &self, parent_hash: ExecutionBlockHash, diff --git a/beacon_node/execution_layer/src/payload_cache.rs b/beacon_node/execution_layer/src/payload_cache.rs new file mode 100644 index 00000000000..60a8f2a95c5 --- /dev/null +++ b/beacon_node/execution_layer/src/payload_cache.rs @@ -0,0 +1,33 @@ +use lru::LruCache; +use parking_lot::Mutex; +use tree_hash::TreeHash; +use types::{EthSpec, ExecutionPayload, Hash256}; + +pub const DEFAULT_PAYLOAD_CACHE_SIZE: usize = 10; + +/// A cache mapping execution payloads by tree hash roots. +pub struct PayloadCache { + payloads: Mutex>>, +} + +#[derive(Hash, PartialEq, Eq)] +struct PayloadCacheId(Hash256); + +impl Default for PayloadCache { + fn default() -> Self { + PayloadCache { + payloads: Mutex::new(LruCache::new(DEFAULT_PAYLOAD_CACHE_SIZE)), + } + } +} + +impl PayloadCache { + pub fn put(&self, payload: ExecutionPayload) -> Option> { + let root = payload.tree_hash_root(); + self.payloads.lock().put(PayloadCacheId(root), payload) + } + + pub fn pop(&self, root: &Hash256) -> Option> { + self.payloads.lock().pop(&PayloadCacheId(*root)) + } +} diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs new file mode 100644 index 00000000000..6b565cb3d87 --- /dev/null +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -0,0 +1,383 @@ +use crate::test_utils::DEFAULT_JWT_SECRET; +use crate::{Config, ExecutionLayer, PayloadAttributes}; +use async_trait::async_trait; +use eth2::types::{BlockId, StateId, ValidatorId}; +use eth2::{BeaconNodeHttpClient, Timeouts}; +use ethereum_consensus::crypto::{SecretKey, Signature}; +use ethereum_consensus::primitives::BlsPublicKey; +pub use ethereum_consensus::state_transition::Context; +use fork_choice::ForkchoiceUpdateParameters; +use mev_build_rs::{ + sign_builder_message, verify_signed_builder_message, BidRequest, BlindedBlockProviderError, + BlindedBlockProviderServer, BuilderBid, ExecutionPayload as ServerPayload, + ExecutionPayloadHeader as ServerPayloadHeader, SignedBlindedBeaconBlock, SignedBuilderBid, + SignedValidatorRegistration, +}; +use parking_lot::RwLock; +use sensitive_url::SensitiveUrl; +use ssz::{Decode, Encode}; +use ssz_rs::{Merkleized, SimpleSerialize}; +use std::collections::HashMap; +use std::fmt::Debug; +use std::net::Ipv4Addr; +use std::sync::Arc; +use std::time::Duration; +use task_executor::TaskExecutor; +use tempfile::NamedTempFile; +use tree_hash::TreeHash; +use types::{ + Address, BeaconState, BlindedPayload, ChainSpec, EthSpec, ExecPayload, Hash256, Slot, Uint256, +}; + +#[derive(Clone)] +pub enum Operation { + FeeRecipient(Address), + GasLimit(usize), + Value(usize), + ParentHash(Hash256), + PrevRandao(Hash256), + BlockNumber(usize), + Timestamp(usize), +} + +impl Operation { + fn apply(self, bid: &mut BuilderBid) -> Result<(), BlindedBlockProviderError> { + match self { + Operation::FeeRecipient(fee_recipient) => { + bid.header.fee_recipient = to_ssz_rs(&fee_recipient)? + } + Operation::GasLimit(gas_limit) => bid.header.gas_limit = gas_limit as u64, + Operation::Value(value) => bid.value = to_ssz_rs(&Uint256::from(value))?, + Operation::ParentHash(parent_hash) => bid.header.parent_hash = to_ssz_rs(&parent_hash)?, + Operation::PrevRandao(prev_randao) => bid.header.prev_randao = to_ssz_rs(&prev_randao)?, + Operation::BlockNumber(block_number) => bid.header.block_number = block_number as u64, + Operation::Timestamp(timestamp) => bid.header.timestamp = timestamp as u64, + } + Ok(()) + } +} + +pub struct TestingBuilder { + server: BlindedBlockProviderServer>, + pub builder: MockBuilder, +} + +impl TestingBuilder { + pub fn new( + mock_el_url: SensitiveUrl, + builder_url: SensitiveUrl, + beacon_url: SensitiveUrl, + spec: ChainSpec, + executor: TaskExecutor, + ) -> Self { + let file = NamedTempFile::new().unwrap(); + let path = file.path().into(); + std::fs::write(&path, hex::encode(DEFAULT_JWT_SECRET)).unwrap(); + + // This EL should not talk to a builder + let config = Config { + execution_endpoints: vec![mock_el_url], + secret_files: vec![path], + suggested_fee_recipient: None, + ..Default::default() + }; + + let el = + ExecutionLayer::from_config(config, executor.clone(), executor.log().clone()).unwrap(); + + // This should probably be done for all fields, we only update ones we are testing with so far. + let mut context = Context::for_mainnet(); + context.terminal_total_difficulty = to_ssz_rs(&spec.terminal_total_difficulty).unwrap(); + context.terminal_block_hash = to_ssz_rs(&spec.terminal_block_hash).unwrap(); + context.terminal_block_hash_activation_epoch = + to_ssz_rs(&spec.terminal_block_hash_activation_epoch).unwrap(); + + let builder = MockBuilder::new( + el, + BeaconNodeHttpClient::new(beacon_url, Timeouts::set_all(Duration::from_secs(1))), + spec, + context, + ); + let port = builder_url.full.port().unwrap(); + let host: Ipv4Addr = builder_url + .full + .host_str() + .unwrap() + .to_string() + .parse() + .unwrap(); + let server = BlindedBlockProviderServer::new(host, port, builder.clone()); + Self { server, builder } + } + + pub async fn run(&self) { + self.server.run().await + } +} + +#[derive(Clone)] +pub struct MockBuilder { + el: ExecutionLayer, + beacon_client: BeaconNodeHttpClient, + spec: ChainSpec, + context: Arc, + val_registration_cache: Arc>>, + builder_sk: SecretKey, + operations: Arc>>, + invalidate_signatures: Arc>, +} + +impl MockBuilder { + pub fn new( + el: ExecutionLayer, + beacon_client: BeaconNodeHttpClient, + spec: ChainSpec, + context: Context, + ) -> Self { + let sk = SecretKey::random(&mut rand::thread_rng()).unwrap(); + Self { + el, + beacon_client, + // Should keep spec and context consistent somehow + spec, + context: Arc::new(context), + val_registration_cache: Arc::new(RwLock::new(HashMap::new())), + builder_sk: sk, + operations: Arc::new(RwLock::new(vec![])), + invalidate_signatures: Arc::new(RwLock::new(false)), + } + } + + pub fn add_operation(&self, op: Operation) { + self.operations.write().push(op); + } + + pub fn invalid_signatures(&self) { + *self.invalidate_signatures.write() = true; + } + + pub fn valid_signatures(&mut self) { + *self.invalidate_signatures.write() = false; + } + + fn apply_operations(&self, bid: &mut BuilderBid) -> Result<(), BlindedBlockProviderError> { + let mut guard = self.operations.write(); + while let Some(op) = guard.pop() { + op.apply(bid)?; + } + Ok(()) + } +} + +#[async_trait] +impl mev_build_rs::BlindedBlockProvider for MockBuilder { + async fn register_validators( + &self, + registrations: &mut [SignedValidatorRegistration], + ) -> Result<(), BlindedBlockProviderError> { + for registration in registrations { + let pubkey = registration.message.public_key.clone(); + let message = &mut registration.message; + verify_signed_builder_message( + message, + ®istration.signature, + &pubkey, + &self.context, + )?; + self.val_registration_cache.write().insert( + registration.message.public_key.clone(), + registration.clone(), + ); + } + + Ok(()) + } + + async fn fetch_best_bid( + &self, + bid_request: &BidRequest, + ) -> Result { + let slot = Slot::new(bid_request.slot); + let signed_cached_data = self + .val_registration_cache + .read() + .get(&bid_request.public_key) + .ok_or_else(|| convert_err("missing registration"))? + .clone(); + let cached_data = signed_cached_data.message; + + let head = self + .beacon_client + .get_beacon_blocks::(BlockId::Head) + .await + .map_err(convert_err)? + .ok_or_else(|| convert_err("missing head block"))?; + + let block = head.data.message_merge().map_err(convert_err)?; + let head_block_root = block.tree_hash_root(); + let head_execution_hash = block.body.execution_payload.execution_payload.block_hash; + if head_execution_hash != from_ssz_rs(&bid_request.parent_hash)? { + return Err(BlindedBlockProviderError::Custom(format!( + "head mismatch: {} {}", + head_execution_hash, bid_request.parent_hash + ))); + } + + let finalized_execution_hash = self + .beacon_client + .get_beacon_blocks::(BlockId::Finalized) + .await + .map_err(convert_err)? + .ok_or_else(|| convert_err("missing finalized block"))? + .data + .message_merge() + .map_err(convert_err)? + .body + .execution_payload + .execution_payload + .block_hash; + + let justified_execution_hash = self + .beacon_client + .get_beacon_blocks::(BlockId::Justified) + .await + .map_err(convert_err)? + .ok_or_else(|| convert_err("missing finalized block"))? + .data + .message_merge() + .map_err(convert_err)? + .body + .execution_payload + .execution_payload + .block_hash; + + let val_index = self + .beacon_client + .get_beacon_states_validator_id( + StateId::Head, + &ValidatorId::PublicKey(from_ssz_rs(&cached_data.public_key)?), + ) + .await + .map_err(convert_err)? + .ok_or_else(|| convert_err("missing validator from state"))? + .data + .index; + let fee_recipient = from_ssz_rs(&cached_data.fee_recipient)?; + let slots_since_genesis = slot.as_u64() - self.spec.genesis_slot.as_u64(); + + let genesis_time = self + .beacon_client + .get_beacon_genesis() + .await + .map_err(convert_err)? + .data + .genesis_time; + let timestamp = (slots_since_genesis * self.spec.seconds_per_slot) + genesis_time; + + let head_state: BeaconState = self + .beacon_client + .get_debug_beacon_states(StateId::Head) + .await + .map_err(convert_err)? + .ok_or_else(|| BlindedBlockProviderError::Custom("missing head state".to_string()))? + .data; + let prev_randao = head_state + .get_randao_mix(head_state.current_epoch()) + .map_err(convert_err)?; + + let payload_attributes = PayloadAttributes { + timestamp, + prev_randao: *prev_randao, + suggested_fee_recipient: fee_recipient, + }; + + self.el + .insert_proposer(slot, head_block_root, val_index, payload_attributes) + .await; + + let forkchoice_update_params = ForkchoiceUpdateParameters { + head_root: Hash256::zero(), + head_hash: None, + justified_hash: Some(justified_execution_hash), + finalized_hash: Some(finalized_execution_hash), + }; + + let payload = self + .el + .get_full_payload_caching::>( + head_execution_hash, + timestamp, + *prev_randao, + fee_recipient, + forkchoice_update_params, + ) + .await + .map_err(convert_err)? + .to_execution_payload_header(); + + let json_payload = serde_json::to_string(&payload).map_err(convert_err)?; + let mut header: ServerPayloadHeader = + serde_json::from_str(json_payload.as_str()).map_err(convert_err)?; + + header.gas_limit = cached_data.gas_limit; + + let mut message = BuilderBid { + header, + value: ssz_rs::U256::default(), + public_key: self.builder_sk.public_key(), + }; + + self.apply_operations(&mut message)?; + + let mut signature = + sign_builder_message(&mut message, &self.builder_sk, self.context.as_ref())?; + + if *self.invalidate_signatures.read() { + signature = Signature::default(); + } + + let signed_bid = SignedBuilderBid { message, signature }; + Ok(signed_bid) + } + + async fn open_bid( + &self, + signed_block: &mut SignedBlindedBeaconBlock, + ) -> Result { + let payload = self + .el + .get_payload_by_root(&from_ssz_rs( + &signed_block + .message + .body + .execution_payload_header + .hash_tree_root() + .map_err(convert_err)?, + )?) + .ok_or_else(|| convert_err("missing payload for tx root"))?; + + let json_payload = serde_json::to_string(&payload).map_err(convert_err)?; + serde_json::from_str(json_payload.as_str()).map_err(convert_err) + } +} + +pub fn from_ssz_rs( + ssz_rs_data: &T, +) -> Result { + U::from_ssz_bytes( + ssz_rs::serialize(ssz_rs_data) + .map_err(convert_err)? + .as_ref(), + ) + .map_err(convert_err) +} + +pub fn to_ssz_rs( + ssz_data: &T, +) -> Result { + ssz_rs::deserialize::(&ssz_data.as_ssz_bytes()).map_err(convert_err) +} + +fn convert_err(e: E) -> BlindedBlockProviderError { + BlindedBlockProviderError::Custom(format!("{e:?}")) +} diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 8a5c26fe8d8..cab2367cd08 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -7,6 +7,7 @@ use crate::{ use sensitive_url::SensitiveUrl; use task_executor::TaskExecutor; use tempfile::NamedTempFile; +use tree_hash::TreeHash; use types::{Address, ChainSpec, Epoch, EthSpec, FullPayload, Hash256, Uint256}; pub struct MockExecutionLayer { @@ -124,6 +125,11 @@ impl MockExecutionLayer { .unwrap(); let validator_index = 0; + let builder_params = BuilderParams { + pubkey: PublicKeyBytes::empty(), + slot, + chain_health: ChainHealth::Healthy, + }; let payload = self .el .get_payload::>( @@ -131,9 +137,9 @@ impl MockExecutionLayer { timestamp, prev_randao, validator_index, - None, - slot, forkchoice_update_params, + builder_params, + &self.spec, ) .await .unwrap() @@ -144,6 +150,43 @@ impl MockExecutionLayer { assert_eq!(payload.timestamp, timestamp); assert_eq!(payload.prev_randao, prev_randao); + // Ensure the payload cache is empty. + assert!(self + .el + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + let builder_params = BuilderParams { + pubkey: PublicKeyBytes::empty(), + slot, + chain_health: ChainHealth::Healthy, + }; + let payload_header = self + .el + .get_payload::>( + parent_hash, + timestamp, + prev_randao, + validator_index, + forkchoice_update_params, + builder_params, + &self.spec, + ) + .await + .unwrap() + .execution_payload_header; + assert_eq!(payload_header.block_hash, block_hash); + assert_eq!(payload_header.parent_hash, parent_hash); + assert_eq!(payload_header.block_number, block_number); + assert_eq!(payload_header.timestamp, timestamp); + assert_eq!(payload_header.prev_randao, prev_randao); + + // Ensure the payload cache has the correct payload. + assert_eq!( + self.el + .get_payload_by_root(&payload_header.tree_hash_root()), + Some(payload.clone()) + ); + let status = self.el.notify_new_payload(&payload).await.unwrap(); assert_eq!(status, PayloadStatus::Valid); diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 970c619a565..24631539515 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -22,6 +22,7 @@ use types::{EthSpec, ExecutionBlockHash, Uint256}; use warp::{http::StatusCode, Filter, Rejection}; pub use execution_block_generator::{generate_pow_block, Block, ExecutionBlockGenerator}; +pub use mock_builder::{Context as MockBuilderContext, MockBuilder, Operation, TestingBuilder}; pub use mock_execution_layer::MockExecutionLayer; pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; @@ -30,6 +31,7 @@ pub const DEFAULT_JWT_SECRET: [u8; 32] = [42; 32]; mod execution_block_generator; mod handle_rpc; +mod mock_builder; mod mock_execution_layer; /// Configuration for the MockExecutionLayer. diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 5cc703aa1ab..fedd66c5404 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -32,15 +32,16 @@ parking_lot = "0.12.0" safe_arith = {path = "../../consensus/safe_arith"} task_executor = { path = "../../common/task_executor" } lru = "0.7.7" +tree_hash = "0.4.1" [dev-dependencies] store = { path = "../store" } environment = { path = "../../lighthouse/environment" } -tree_hash = "0.4.1" sensitive_url = { path = "../../common/sensitive_url" } logging = { path = "../../common/logging" } serde_json = "1.0.58" proto_array = { path = "../../consensus/proto_array" } +unused_port = {path = "../../common/unused_port"} [[test]] name = "bn_http_api_tests" diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index c2503f392fa..a1b23c7f035 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -13,17 +13,16 @@ mod block_rewards; mod database; mod metrics; mod proposer_duties; +mod publish_blocks; mod state_id; mod sync_committees; mod validator_inclusion; mod version; use beacon_chain::{ - attestation_verification::VerifiedAttestation, - observed_operations::ObservationOutcome, - validator_monitor::{get_block_delay_ms, timestamp_now}, - AttestationError as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes, - CountUnrealized, ProduceBlockVerification, WhenSlotSkipped, + attestation_verification::VerifiedAttestation, observed_operations::ObservationOutcome, + validator_monitor::timestamp_now, AttestationError as AttnError, BeaconChain, BeaconChainError, + BeaconChainTypes, ProduceBlockVerification, WhenSlotSkipped, }; pub use block_id::BlockId; use eth2::types::{self as api_types, EndpointVersion, ValidatorId}; @@ -45,12 +44,11 @@ use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; use tokio_stream::{wrappers::BroadcastStream, StreamExt}; use types::{ - Attestation, AttesterSlashing, BeaconBlockBodyMerge, BeaconBlockMerge, BeaconStateError, - BlindedPayload, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload, - ProposerPreparationData, ProposerSlashing, RelativeEpoch, Signature, SignedAggregateAndProof, - SignedBeaconBlock, SignedBeaconBlockMerge, SignedBlindedBeaconBlock, - SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, - SyncCommitteeMessage, SyncContributionData, + Attestation, AttesterSlashing, BeaconStateError, BlindedPayload, CommitteeCache, + ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload, ProposerPreparationData, + ProposerSlashing, RelativeEpoch, Signature, SignedAggregateAndProof, SignedBeaconBlock, + SignedBlindedBeaconBlock, SignedContributionAndProof, SignedValidatorRegistrationData, + SignedVoluntaryExit, Slot, SyncCommitteeMessage, SyncContributionData, }; use version::{ add_consensus_version_header, execution_optimistic_fork_versioned_response, @@ -1025,81 +1023,9 @@ pub fn serve( chain: Arc>, network_tx: UnboundedSender>, log: Logger| async move { - let seen_timestamp = timestamp_now(); - - // Send the block, regardless of whether or not it is valid. The API - // specification is very clear that this is the desired behaviour. - publish_pubsub_message(&network_tx, PubsubMessage::BeaconBlock(block.clone()))?; - - // Determine the delay after the start of the slot, register it with metrics. - let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); - metrics::observe_duration(&metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, delay); - - match chain - .process_block(block.clone(), CountUnrealized::True) + publish_blocks::publish_block(block, chain, &network_tx, log) .await - { - Ok(root) => { - info!( - log, - "Valid block from HTTP API"; - "block_delay" => ?delay, - "root" => format!("{}", root), - "proposer_index" => block.message().proposer_index(), - "slot" => block.slot(), - ); - - // Notify the validator monitor. - chain.validator_monitor.read().register_api_block( - seen_timestamp, - block.message(), - root, - &chain.slot_clock, - ); - - // Update the head since it's likely this block will become the new - // head. - chain.recompute_head_at_current_slot().await; - - // Perform some logging to inform users if their blocks are being produced - // late. - // - // Check to see the thresholds are non-zero to avoid logging errors with small - // slot times (e.g., during testing) - let crit_threshold = chain.slot_clock.unagg_attestation_production_delay(); - let error_threshold = crit_threshold / 2; - if delay >= crit_threshold { - crit!( - log, - "Block was broadcast too late"; - "msg" => "system may be overloaded, block likely to be orphaned", - "delay_ms" => delay.as_millis(), - "slot" => block.slot(), - "root" => ?root, - ) - } else if delay >= error_threshold { - error!( - log, - "Block broadcast was delayed"; - "msg" => "system may be overloaded, block may be orphaned", - "delay_ms" => delay.as_millis(), - "slot" => block.slot(), - "root" => ?root, - ) - } - - Ok(warp::reply::json(&())) - } - Err(e) => { - let msg = format!("{:?}", e); - error!( - log, - "Invalid block provided to HTTP API"; - "reason" => &msg - ); - Err(warp_utils::reject::broadcast_without_import(msg)) - } - } + .map(|()| warp::reply()) }, ); @@ -1117,87 +1043,13 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .and_then( - |block: Arc>>, + |block: SignedBeaconBlock>, chain: Arc>, network_tx: UnboundedSender>, - _log: Logger| async move { - if let Some(el) = chain.execution_layer.as_ref() { - //FIXME(sean): we may not always receive the payload in this response because it - // should be the relay's job to propogate the block. However, since this block is - // already signed and sent this might be ok (so long as the relay validates - // the block before revealing the payload). - - //FIXME(sean) additionally, this endpoint should serve blocks prior to Bellatrix, and should - // be able to support the normal block proposal flow, because at some point full block endpoints - // will be deprecated from the beacon API. This will entail creating full blocks in - // `validator/blinded_blocks`, caching their payloads, and transforming them into blinded - // blocks. We will access the payload of those blocks here. This flow should happen if the - // execution layer has no payload builders or if we have not yet finalized post-merge transition. - let payload = el.propose_blinded_beacon_block(&block).await.map_err(|e| { - warp_utils::reject::custom_server_error(format!("proposal failed: {:?}", e)) - })?; - let new_block = SignedBeaconBlock::Merge(SignedBeaconBlockMerge { - message: BeaconBlockMerge { - slot: block.message().slot(), - proposer_index: block.message().proposer_index(), - parent_root: block.message().parent_root(), - state_root: block.message().state_root(), - body: BeaconBlockBodyMerge { - randao_reveal: block.message().body().randao_reveal().clone(), - eth1_data: block.message().body().eth1_data().clone(), - graffiti: *block.message().body().graffiti(), - proposer_slashings: block - .message() - .body() - .proposer_slashings() - .clone(), - attester_slashings: block - .message() - .body() - .attester_slashings() - .clone(), - attestations: block.message().body().attestations().clone(), - deposits: block.message().body().deposits().clone(), - voluntary_exits: block.message().body().voluntary_exits().clone(), - sync_aggregate: block - .message() - .body() - .sync_aggregate() - .unwrap() - .clone(), - execution_payload: payload.into(), - }, - }, - signature: block.signature().clone(), - }); - let new_block = Arc::new(new_block); - - // Send the block, regardless of whether or not it is valid. The API - // specification is very clear that this is the desired behaviour. - publish_pubsub_message( - &network_tx, - PubsubMessage::BeaconBlock(new_block.clone()), - )?; - - match chain.process_block(new_block, CountUnrealized::True).await { - Ok(_) => { - // Update the head since it's likely this block will become the new - // head. - chain.recompute_head_at_current_slot().await; - - Ok(warp::reply::json(&())) - } - Err(e) => { - let msg = format!("{:?}", e); - - Err(warp_utils::reject::broadcast_without_import(msg)) - } - } - } else { - Err(warp_utils::reject::custom_server_error( - "no execution layer found".to_string(), - )) - } + log: Logger| async move { + publish_blocks::publish_blinded_block(block, chain, &network_tx, log) + .await + .map(|()| warp::reply()) }, ); @@ -2593,19 +2445,13 @@ pub fn serve( }) .collect::>(); - debug!( - log, - "Resolved validator request pubkeys"; - "count" => preparation_data.len() - ); - // Update the prepare beacon proposer cache based on this request. execution_layer .update_proposer_preparation(current_epoch, &preparation_data) .await; // Call prepare beacon proposer blocking with the latest update in order to make - // sure we have a local payload to fall back to in the event of the blined block + // sure we have a local payload to fall back to in the event of the blinded block // flow failing. chain .prepare_beacon_proposer(current_slot) @@ -2617,9 +2463,37 @@ pub fn serve( )) })?; - //TODO(sean): In the MEV-boost PR, add a call here to send the update request to the builder + let builder = execution_layer + .builder() + .as_ref() + .ok_or(BeaconChainError::BuilderMissing) + .map_err(warp_utils::reject::beacon_chain_error)?; + + info!( + log, + "Forwarding register validator request to connected builder"; + "count" => register_val_data.len(), + ); - Ok::<_, warp::Rejection>(warp::reply::json(&())) + builder + .post_builder_validators(®ister_val_data) + .await + .map(|resp| warp::reply::json(&resp)) + .map_err(|e| { + error!(log, "Error from connected relay"; "error" => ?e); + // Forward the HTTP status code if we are able to, otherwise fall back + // to a server error. + if let eth2::Error::ServerMessage(message) = e { + if message.code == StatusCode::BAD_REQUEST.as_u16() { + return warp_utils::reject::custom_bad_request(message.message); + } else { + // According to the spec this response should only be a 400 or 500, + // so we fall back to a 500 here. + return warp_utils::reject::custom_server_error(message.message); + } + } + warp_utils::reject::custom_server_error(format!("{e:?}")) + }) }, ); // POST validator/sync_committee_subscriptions diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs new file mode 100644 index 00000000000..b282e6f490e --- /dev/null +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -0,0 +1,155 @@ +use crate::metrics; +use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; +use beacon_chain::{BeaconChain, BeaconChainTypes, CountUnrealized}; +use lighthouse_network::PubsubMessage; +use network::NetworkMessage; +use slog::{crit, error, info, Logger}; +use slot_clock::SlotClock; +use std::sync::Arc; +use tokio::sync::mpsc::UnboundedSender; +use tree_hash::TreeHash; +use types::{ + BlindedPayload, ExecPayload, ExecutionBlockHash, ExecutionPayload, FullPayload, + SignedBeaconBlock, +}; +use warp::Rejection; + +/// Handles a request from the HTTP API for full blocks. +pub async fn publish_block( + block: Arc>, + chain: Arc>, + network_tx: &UnboundedSender>, + log: Logger, +) -> Result<(), Rejection> { + let seen_timestamp = timestamp_now(); + + // Send the block, regardless of whether or not it is valid. The API + // specification is very clear that this is the desired behaviour. + crate::publish_pubsub_message(network_tx, PubsubMessage::BeaconBlock(block.clone()))?; + + // Determine the delay after the start of the slot, register it with metrics. + let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); + metrics::observe_duration(&metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, delay); + + match chain + .process_block(block.clone(), CountUnrealized::True) + .await + { + Ok(root) => { + info!( + log, + "Valid block from HTTP API"; + "block_delay" => ?delay, + "root" => format!("{}", root), + "proposer_index" => block.message().proposer_index(), + "slot" => block.slot(), + ); + + // Notify the validator monitor. + chain.validator_monitor.read().register_api_block( + seen_timestamp, + block.message(), + root, + &chain.slot_clock, + ); + + // Update the head since it's likely this block will become the new + // head. + chain.recompute_head_at_current_slot().await; + + // Perform some logging to inform users if their blocks are being produced + // late. + // + // Check to see the thresholds are non-zero to avoid logging errors with small + // slot times (e.g., during testing) + let crit_threshold = chain.slot_clock.unagg_attestation_production_delay(); + let error_threshold = crit_threshold / 2; + if delay >= crit_threshold { + crit!( + log, + "Block was broadcast too late"; + "msg" => "system may be overloaded, block likely to be orphaned", + "delay_ms" => delay.as_millis(), + "slot" => block.slot(), + "root" => ?root, + ) + } else if delay >= error_threshold { + error!( + log, + "Block broadcast was delayed"; + "msg" => "system may be overloaded, block may be orphaned", + "delay_ms" => delay.as_millis(), + "slot" => block.slot(), + "root" => ?root, + ) + } + + Ok(()) + } + Err(e) => { + let msg = format!("{:?}", e); + error!( + log, + "Invalid block provided to HTTP API"; + "reason" => &msg + ); + Err(warp_utils::reject::broadcast_without_import(msg)) + } + } +} + +/// Handles a request from the HTTP API for blinded blocks. This converts blinded blocks into full +/// blocks before publishing. +pub async fn publish_blinded_block( + block: SignedBeaconBlock>, + chain: Arc>, + network_tx: &UnboundedSender>, + log: Logger, +) -> Result<(), Rejection> { + let full_block = reconstruct_block(chain.clone(), block, log.clone()).await?; + publish_block::(Arc::new(full_block), chain, network_tx, log).await +} + +/// Deconstruct the given blinded block, and construct a full block. This attempts to use the +/// execution layer's payload cache, and if that misses, attempts a blind block proposal to retrieve +/// the full payload. +async fn reconstruct_block( + chain: Arc>, + block: SignedBeaconBlock>, + log: Logger, +) -> Result>, Rejection> { + let full_payload = if let Ok(payload_header) = block.message().body().execution_payload() { + let el = chain.execution_layer.as_ref().ok_or_else(|| { + warp_utils::reject::custom_server_error("Missing execution layer".to_string()) + })?; + + // If the execution block hash is zero, use an empty payload. + let full_payload = if payload_header.block_hash() == ExecutionBlockHash::zero() { + ExecutionPayload::default() + // If we already have an execution payload with this transactions root cached, use it. + } else if let Some(cached_payload) = + el.get_payload_by_root(&payload_header.tree_hash_root()) + { + info!(log, "Reconstructing a full block using a local payload"; "block_hash" => ?cached_payload.block_hash); + cached_payload + // Otherwise, this means we are attempting a blind block proposal. + } else { + let full_payload = el.propose_blinded_beacon_block(&block).await.map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Blind block proposal failed: {:?}", + e + )) + })?; + info!(log, "Successfully published a block to the builder network"; "block_hash" => ?full_payload.block_hash); + full_payload + }; + + Some(full_payload) + } else { + None + }; + + block.try_into_full_block(full_payload).ok_or_else(|| { + warp_utils::reject::custom_server_error("Unable to add payload to block".to_string()) + }) +} diff --git a/beacon_node/http_api/tests/common.rs b/beacon_node/http_api/tests/common.rs index 06466c43bb4..8f9856991fe 100644 --- a/beacon_node/http_api/tests/common.rs +++ b/beacon_node/http_api/tests/common.rs @@ -86,6 +86,16 @@ impl InteractiveTester { pub async fn create_api_server( chain: Arc>, log: Logger, +) -> ApiServer> { + // Get a random unused port. + let port = unused_port::unused_tcp_port().unwrap(); + create_api_server_on_port(chain, log, port).await +} + +pub async fn create_api_server_on_port( + chain: Arc>, + log: Logger, + port: u16, ) -> ApiServer> { let (network_tx, network_rx) = mpsc::unbounded_channel(); @@ -129,7 +139,7 @@ pub async fn create_api_server( config: Config { enabled: true, listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), - listen_port: 0, + listen_port: port, allow_origin: None, serve_legacy_spec: true, tls_config: None, diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index b4c29cae42c..38c06848cf1 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1,4 +1,4 @@ -use crate::common::{create_api_server, ApiServer}; +use crate::common::{create_api_server, create_api_server_on_port, ApiServer}; use beacon_chain::test_utils::RelativeSyncCommittee; use beacon_chain::{ test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, @@ -11,6 +11,8 @@ use eth2::{ types::{BlockId as CoreBlockId, StateId as CoreStateId, *}, BeaconNodeHttpClient, Error, StatusCode, Timeouts, }; +use execution_layer::test_utils::Operation; +use execution_layer::test_utils::TestingBuilder; use futures::stream::{Stream, StreamExt}; use futures::FutureExt; use http_api::{BlockId, StateId}; @@ -66,6 +68,7 @@ struct ApiTester { network_rx: mpsc::UnboundedReceiver>, local_enr: Enr, external_peer_id: PeerId, + mock_builder: Option>>, } impl ApiTester { @@ -90,12 +93,16 @@ impl ApiTester { } pub async fn new_from_spec(spec: ChainSpec) -> Self { + // Get a random unused port + let port = unused_port::unused_tcp_port().unwrap(); + let beacon_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(); + let harness = Arc::new( BeaconChainHarness::builder(MainnetEthSpec) .spec(spec.clone()) .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() - .mock_execution_layer() + .mock_execution_layer_with_builder(beacon_url.clone()) .build(), ); @@ -205,25 +212,28 @@ impl ApiTester { let ApiServer { server, - listening_socket, + listening_socket: _, shutdown_tx, network_rx, local_enr, external_peer_id, - } = create_api_server(chain.clone(), log).await; + } = create_api_server_on_port(chain.clone(), log, port).await; harness.runtime.task_executor.spawn(server, "api_server"); let client = BeaconNodeHttpClient::new( - SensitiveUrl::parse(&format!( - "http://{}:{}", - listening_socket.ip(), - listening_socket.port() - )) - .unwrap(), + beacon_url, Timeouts::set_all(Duration::from_secs(SECONDS_PER_SLOT)), ); + let builder_ref = harness.mock_builder.as_ref().unwrap().clone(); + harness.runtime.task_executor.spawn( + async move { builder_ref.run().await }, + "mock_builder_server", + ); + + let mock_builder = harness.mock_builder.clone(); + Self { harness, chain, @@ -239,6 +249,7 @@ impl ApiTester { network_rx, local_enr, external_peer_id, + mock_builder, } } @@ -321,6 +332,7 @@ impl ApiTester { network_rx, local_enr, external_peer_id, + mock_builder: None, } } @@ -328,6 +340,13 @@ impl ApiTester { &self.harness.validator_keypairs } + pub async fn new_mev_tester() -> Self { + Self::new_with_hard_forks(true, true) + .await + .test_post_validator_register_validator() + .await + } + fn skip_slots(self, count: u64) -> Self { for _ in 0..count { self.chain @@ -2005,6 +2024,175 @@ impl ApiTester { self } + pub async fn test_blinded_block_production>(&self) { + let fork = self.chain.canonical_head.cached_head().head_fork(); + let genesis_validators_root = self.chain.genesis_validators_root; + + for _ in 0..E::slots_per_epoch() * 3 { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let proposer_pubkey_bytes = self + .client + .get_validator_duties_proposer(epoch) + .await + .unwrap() + .data + .into_iter() + .find(|duty| duty.slot == slot) + .map(|duty| duty.pubkey) + .unwrap(); + let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); + + let sk = self + .validator_keypairs() + .iter() + .find(|kp| kp.pk == proposer_pubkey) + .map(|kp| kp.sk.clone()) + .unwrap(); + + let randao_reveal = { + let domain = self.chain.spec.get_domain( + epoch, + Domain::Randao, + &fork, + genesis_validators_root, + ); + let message = epoch.signing_root(domain); + sk.sign(message).into() + }; + + let block = self + .client + .get_validator_blinded_blocks::(slot, &randao_reveal, None) + .await + .unwrap() + .data; + + let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + + self.client + .post_beacon_blinded_blocks(&signed_block) + .await + .unwrap(); + + // This converts the generic `Payload` to a concrete type for comparison. + let head_block = SignedBeaconBlock::from(signed_block.clone()); + assert_eq!(head_block, signed_block); + + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + } + } + + pub async fn test_blinded_block_production_no_verify_randao>( + self, + ) -> Self { + for _ in 0..E::slots_per_epoch() { + let slot = self.chain.slot().unwrap(); + + let block = self + .client + .get_validator_blinded_blocks_with_verify_randao::( + slot, + None, + None, + Some(false), + ) + .await + .unwrap() + .data; + assert_eq!(block.slot(), slot); + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + } + + self + } + + pub async fn test_blinded_block_production_verify_randao_invalid>( + self, + ) -> Self { + let fork = self.chain.canonical_head.cached_head().head_fork(); + let genesis_validators_root = self.chain.genesis_validators_root; + + for _ in 0..E::slots_per_epoch() { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let proposer_pubkey_bytes = self + .client + .get_validator_duties_proposer(epoch) + .await + .unwrap() + .data + .into_iter() + .find(|duty| duty.slot == slot) + .map(|duty| duty.pubkey) + .unwrap(); + let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); + + let sk = self + .validator_keypairs() + .iter() + .find(|kp| kp.pk == proposer_pubkey) + .map(|kp| kp.sk.clone()) + .unwrap(); + + let bad_randao_reveal = { + let domain = self.chain.spec.get_domain( + epoch, + Domain::Randao, + &fork, + genesis_validators_root, + ); + let message = (epoch + 1).signing_root(domain); + sk.sign(message).into() + }; + + // Check failure with no `verify_randao` passed. + self.client + .get_validator_blinded_blocks::(slot, &bad_randao_reveal, None) + .await + .unwrap_err(); + + // Check failure with `verify_randao=true`. + self.client + .get_validator_blinded_blocks_with_verify_randao::( + slot, + Some(&bad_randao_reveal), + None, + Some(true), + ) + .await + .unwrap_err(); + + // Check failure with no randao reveal provided. + self.client + .get_validator_blinded_blocks_with_verify_randao::( + slot, None, None, None, + ) + .await + .unwrap_err(); + + // Check success with `verify_randao=false`. + let block = self + .client + .get_validator_blinded_blocks_with_verify_randao::( + slot, + Some(&bad_randao_reveal), + None, + Some(false), + ) + .await + .unwrap() + .data; + + assert_eq!(block.slot(), slot); + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + } + + self + } + pub async fn test_get_validator_attestation_data(self) -> Self { let mut state = self.chain.head_beacon_state_cloned(); let slot = state.slot(); @@ -2203,7 +2391,14 @@ impl ApiTester { let mut registrations = vec![]; let mut fee_recipients = vec![]; - let fork = self.chain.head_snapshot().beacon_state.fork(); + let genesis_epoch = self.chain.spec.genesis_slot.epoch(E::slots_per_epoch()); + let fork = Fork { + current_version: self.chain.spec.genesis_fork_version, + previous_version: self.chain.spec.genesis_fork_version, + epoch: genesis_epoch, + }; + + let expected_gas_limit = 11_111_111; for (val_index, keypair) in self.validator_keypairs().iter().enumerate() { let pubkey = keypair.pk.compress(); @@ -2211,12 +2406,13 @@ impl ApiTester { let data = ValidatorRegistrationData { fee_recipient, - gas_limit: 0, + gas_limit: expected_gas_limit, timestamp: 0, pubkey, }; + let domain = self.chain.spec.get_domain( - Epoch::new(0), + genesis_epoch, Domain::ApplicationMask(ApplicationDomain::Builder), &fork, Hash256::zero(), @@ -2224,11 +2420,13 @@ impl ApiTester { let message = data.signing_root(domain); let signature = keypair.sk.sign(message); - fee_recipients.push(fee_recipient); - registrations.push(SignedValidatorRegistrationData { + let signed = SignedValidatorRegistrationData { message: data, signature, - }); + }; + + fee_recipients.push(fee_recipient); + registrations.push(signed); } self.client @@ -2258,64 +2456,652 @@ impl ApiTester { self } - #[cfg(target_os = "linux")] - pub async fn test_get_lighthouse_health(self) -> Self { - self.client.get_lighthouse_health().await.unwrap(); + // Helper function for tests that require a valid RANDAO signature. + async fn get_test_randao(&self, slot: Slot, epoch: Epoch) -> (u64, SignatureBytes) { + let fork = self.chain.canonical_head.cached_head().head_fork(); + let genesis_validators_root = self.chain.genesis_validators_root; - self - } + let (proposer_pubkey_bytes, proposer_index) = self + .client + .get_validator_duties_proposer(epoch) + .await + .unwrap() + .data + .into_iter() + .find(|duty| duty.slot == slot) + .map(|duty| (duty.pubkey, duty.validator_index)) + .unwrap(); + let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); - #[cfg(not(target_os = "linux"))] - pub async fn test_get_lighthouse_health(self) -> Self { - self.client.get_lighthouse_health().await.unwrap_err(); + let sk = self + .validator_keypairs() + .iter() + .find(|kp| kp.pk == proposer_pubkey) + .map(|kp| kp.sk.clone()) + .unwrap(); - self + let randao_reveal = { + let domain = + self.chain + .spec + .get_domain(epoch, Domain::Randao, &fork, genesis_validators_root); + let message = epoch.signing_root(domain); + sk.sign(message).into() + }; + (proposer_index, randao_reveal) } - pub async fn test_get_lighthouse_syncing(self) -> Self { - self.client.get_lighthouse_syncing().await.unwrap(); + pub async fn test_payload_respects_registration(self) -> Self { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); - self - } + let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; - pub async fn test_get_lighthouse_proto_array(self) -> Self { - self.client.get_lighthouse_proto_array().await.unwrap(); + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + assert_eq!( + payload.execution_payload_header.fee_recipient, + expected_fee_recipient + ); + assert_eq!(payload.execution_payload_header.gas_limit, 11_111_111); + // If this cache is empty, it indicates fallback was not used, so the payload came from the + // mock builder. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); self } - pub async fn test_get_lighthouse_validator_inclusion_global(self) -> Self { - let epoch = self.chain.epoch().unwrap() - 1; - self.client - .get_lighthouse_validator_inclusion_global(epoch) - .await - .unwrap(); + pub async fn test_payload_accepts_mutated_gas_limit(self) -> Self { + // Mutate gas limit. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::GasLimit(30_000_000)); - self - } + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); - pub async fn test_get_lighthouse_validator_inclusion(self) -> Self { - let epoch = self.chain.epoch().unwrap() - 1; - self.client - .get_lighthouse_validator_inclusion(epoch, ValidatorId::Index(0)) - .await - .unwrap(); + let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; - self - } + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); - pub async fn test_get_lighthouse_eth1_syncing(self) -> Self { - self.client.get_lighthouse_eth1_syncing().await.unwrap(); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + assert_eq!( + payload.execution_payload_header.fee_recipient, + expected_fee_recipient + ); + assert_eq!(payload.execution_payload_header.gas_limit, 30_000_000); + // This cache should not be populated because fallback should not have been used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); self } - pub async fn test_get_lighthouse_eth1_block_cache(self) -> Self { - let blocks = self.client.get_lighthouse_eth1_block_cache().await.unwrap(); + pub async fn test_payload_accepts_changed_fee_recipient(self) -> Self { + let test_fee_recipient = "0x4242424242424242424242424242424242424242" + .parse::
() + .unwrap(); - assert!(blocks.data.is_empty()); + // Mutate fee recipient. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::FeeRecipient(test_fee_recipient)); - self + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + assert_eq!( + payload.execution_payload_header.fee_recipient, + test_fee_recipient + ); + + // This cache should not be populated because fallback should not have been used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + self + } + + pub async fn test_payload_rejects_invalid_parent_hash(self) -> Self { + let invalid_parent_hash = + "0x4242424242424242424242424242424242424242424242424242424242424242" + .parse::() + .unwrap(); + + // Mutate parent hash. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::ParentHash(invalid_parent_hash)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let expected_parent_hash = self + .chain + .head_snapshot() + .beacon_state + .latest_execution_payload_header() + .unwrap() + .block_hash; + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + assert_eq!( + payload.execution_payload_header.parent_hash, + expected_parent_hash + ); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_payload_rejects_invalid_prev_randao(self) -> Self { + let invalid_prev_randao = + "0x4242424242424242424242424242424242424242424242424242424242424242" + .parse::() + .unwrap(); + + // Mutate prev randao. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::PrevRandao(invalid_prev_randao)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let expected_prev_randao = self + .chain + .canonical_head + .cached_head() + .head_random() + .unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + assert_eq!( + payload.execution_payload_header.prev_randao, + expected_prev_randao + ); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_payload_rejects_invalid_block_number(self) -> Self { + let invalid_block_number = 2; + + // Mutate block number. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::BlockNumber(invalid_block_number)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let expected_block_number = self + .chain + .head_snapshot() + .beacon_state + .latest_execution_payload_header() + .unwrap() + .block_number + + 1; + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + assert_eq!( + payload.execution_payload_header.block_number, + expected_block_number + ); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_payload_rejects_invalid_timestamp(self) -> Self { + let invalid_timestamp = 2; + + // Mutate timestamp. + self.mock_builder + .as_ref() + .unwrap() + .builder + .add_operation(Operation::Timestamp(invalid_timestamp)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let min_expected_timestamp = self + .chain + .head_snapshot() + .beacon_state + .latest_execution_payload_header() + .unwrap() + .timestamp; + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + assert!(payload.execution_payload_header.timestamp > min_expected_timestamp); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_payload_rejects_invalid_signature(self) -> Self { + self.mock_builder + .as_ref() + .unwrap() + .builder + .invalid_signatures(); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_builder_chain_health_skips(self) -> Self { + let slot = self.chain.slot().unwrap(); + + // Since we are proposing this slot, start the count from the previous slot. + let prev_slot = slot - Slot::new(1); + let head_slot = self.chain.canonical_head.cached_head().head_slot(); + let epoch = self.chain.epoch().unwrap(); + + // Inclusive here to make sure we advance one slot past the threshold. + for _ in (prev_slot - head_slot).as_usize()..=self.chain.config.builder_fallback_skips { + self.harness.advance_slot(); + } + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload = self + .client + .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + self + } + + pub async fn test_builder_chain_health_skips_per_epoch(self) -> Self { + // Fill an epoch with `builder_fallback_skips_per_epoch` skip slots. + for i in 0..E::slots_per_epoch() { + if i == 0 || i as usize > self.chain.config.builder_fallback_skips_per_epoch { + self.harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + } + self.harness.advance_slot(); + } + + let next_slot = self.chain.slot().unwrap(); + + let (_, randao_reveal) = self + .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) + .await; + + let payload = self + .client + .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + // This cache should not be populated because fallback should not have been used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + + // Without proposing, advance into the next slot, this should make us cross the threshold + // number of skips, causing us to use the fallback. + self.harness.advance_slot(); + let next_slot = self.chain.slot().unwrap(); + + let (_, randao_reveal) = self + .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) + .await; + + let payload = self + .client + .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + + self + } + + pub async fn test_builder_chain_health_epochs_since_finalization(self) -> Self { + let skips = E::slots_per_epoch() + * self.chain.config.builder_fallback_epochs_since_finalization as u64; + + for _ in 0..skips { + self.harness.advance_slot(); + } + + // Fill the next epoch with blocks, should be enough to justify, not finalize. + for _ in 0..E::slots_per_epoch() { + self.harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + self.harness.advance_slot(); + } + + let next_slot = self.chain.slot().unwrap(); + + let (_, randao_reveal) = self + .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) + .await; + + let payload = self + .client + .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + + // Fill another epoch with blocks, should be enough to finalize. (Sneaky plus 1 because this + // scenario starts at an epoch boundary). + for _ in 0..E::slots_per_epoch() + 1 { + self.harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + self.harness.advance_slot(); + } + + let next_slot = self.chain.slot().unwrap(); + + let (_, randao_reveal) = self + .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) + .await; + + let payload = self + .client + .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .clone(); + + // This cache should not be populated because fallback should not have been used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_none()); + + self + } + + #[cfg(target_os = "linux")] + pub async fn test_get_lighthouse_health(self) -> Self { + self.client.get_lighthouse_health().await.unwrap(); + + self + } + + #[cfg(not(target_os = "linux"))] + pub async fn test_get_lighthouse_health(self) -> Self { + self.client.get_lighthouse_health().await.unwrap_err(); + + self + } + + pub async fn test_get_lighthouse_syncing(self) -> Self { + self.client.get_lighthouse_syncing().await.unwrap(); + + self + } + + pub async fn test_get_lighthouse_proto_array(self) -> Self { + self.client.get_lighthouse_proto_array().await.unwrap(); + + self + } + + pub async fn test_get_lighthouse_validator_inclusion_global(self) -> Self { + let epoch = self.chain.epoch().unwrap() - 1; + self.client + .get_lighthouse_validator_inclusion_global(epoch) + .await + .unwrap(); + + self + } + + pub async fn test_get_lighthouse_validator_inclusion(self) -> Self { + let epoch = self.chain.epoch().unwrap() - 1; + self.client + .get_lighthouse_validator_inclusion(epoch, ValidatorId::Index(0)) + .await + .unwrap(); + + self + } + + pub async fn test_get_lighthouse_eth1_syncing(self) -> Self { + self.client.get_lighthouse_eth1_syncing().await.unwrap(); + + self + } + + pub async fn test_get_lighthouse_eth1_block_cache(self) -> Self { + let blocks = self.client.get_lighthouse_eth1_block_cache().await.unwrap(); + + assert!(blocks.data.is_empty()); + + self } pub async fn test_get_lighthouse_eth1_deposit_cache(self) -> Self { @@ -2976,6 +3762,72 @@ async fn block_production_verify_randao_invalid() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_full_payload_premerge() { + ApiTester::new() + .await + .test_blinded_block_production::>() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_with_skip_slots_full_payload_premerge() { + ApiTester::new() + .await + .skip_slots(E::slots_per_epoch() * 2) + .test_blinded_block_production::>() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_no_verify_randao_full_payload_premerge() { + ApiTester::new() + .await + .test_blinded_block_production_no_verify_randao::>() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_verify_randao_invalid_full_payload_premerge() { + ApiTester::new() + .await + .test_blinded_block_production_verify_randao_invalid::>() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_blinded_payload_premerge() { + ApiTester::new() + .await + .test_blinded_block_production::>() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_with_skip_slots_blinded_payload_premerge() { + ApiTester::new() + .await + .skip_slots(E::slots_per_epoch() * 2) + .test_blinded_block_production::>() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_no_verify_randao_blinded_payload_premerge() { + ApiTester::new() + .await + .test_blinded_block_production_no_verify_randao::>() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn blinded_block_production_verify_randao_invalid_blinded_payload_premerge() { + ApiTester::new() + .await + .test_blinded_block_production_verify_randao_invalid::>() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_validator_attestation_data() { ApiTester::new() @@ -3060,6 +3912,94 @@ async fn post_validator_register_validator() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_register_valid() { + ApiTester::new_mev_tester() + .await + .test_payload_respects_registration() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_register_gas_limit_mutation() { + ApiTester::new_mev_tester() + .await + .test_payload_accepts_mutated_gas_limit() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_register_fee_recipient_mutation() { + ApiTester::new_mev_tester() + .await + .test_payload_accepts_changed_fee_recipient() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_blinded_block_invalid_parent_hash() { + ApiTester::new_mev_tester() + .await + .test_payload_rejects_invalid_parent_hash() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_blinded_block_invalid_prev_randao() { + ApiTester::new_mev_tester() + .await + .test_payload_rejects_invalid_prev_randao() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_blinded_block_invalid_block_number() { + ApiTester::new_mev_tester() + .await + .test_payload_rejects_invalid_block_number() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_blinded_block_invalid_timestamp() { + ApiTester::new_mev_tester() + .await + .test_payload_rejects_invalid_timestamp() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_blinded_block_invalid_signature() { + ApiTester::new_mev_tester() + .await + .test_payload_rejects_invalid_signature() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_chain_health_skips() { + ApiTester::new_mev_tester() + .await + .test_builder_chain_health_skips() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_chain_health_skips_per_epoch() { + ApiTester::new_mev_tester() + .await + .test_builder_chain_health_skips_per_epoch() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_chain_health_epochs_since_finalization() { + ApiTester::new_mev_tester() + .await + .test_builder_chain_health_epochs_since_finalization() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn lighthouse_endpoints() { ApiTester::new() diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index b36f154ae83..3b78d8f684f 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -708,6 +708,46 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("250") .takes_value(true) ) + .arg( + Arg::with_name("builder-fallback-skips") + .long("builder-fallback-skips") + .help("If this node is proposing a block and has seen this number of skip slots \ + on the canonical chain in a row, it will NOT query any connected builders, \ + and will use the local execution engine for payload construction.") + .default_value("3") + .takes_value(true) + ) + .arg( + Arg::with_name("builder-fallback-skips-per-epoch") + .long("builder-fallback-skips-per-epoch") + .help("If this node is proposing a block and has seen this number of skip slots \ + on the canonical chain in the past `SLOTS_PER_EPOCH`, it will NOT query \ + any connected builders, and will use the local execution engine for \ + payload construction.") + .default_value("8") + .takes_value(true) + ) + .arg( + Arg::with_name("builder-fallback-epochs-since-finalization") + .long("builder-fallback-epochs-since-finalization") + .help("If this node is proposing a block and the chain has not finalized within \ + this number of epochs, it will NOT query any connected builders, \ + and will use the local execution engine for payload construction. Setting \ + this value to anything less than 2 will cause the node to NEVER query \ + connected builders. Setting it to 2 will cause this condition to be hit \ + if there are skips slots at the start of an epoch, right before this node \ + is set to propose.") + .default_value("3") + .takes_value(true) + ) + .arg( + Arg::with_name("builder-fallback-disable-checks") + .long("builder-fallback-disable-checks") + .help("This flag disables all checks related to chain health. This means the builder \ + API will always be used for payload construction, regardless of recent chain \ + conditions.") + .takes_value(false) + ) .arg( Arg::with_name("count-unrealized") .long("count-unrealized") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index fb0cbe0c920..584a961958b 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -634,6 +634,20 @@ pub fn get_config( client_config.chain.count_unrealized = true; } + /* + * Builder fallback configs. + */ + client_config.chain.builder_fallback_skips = + clap_utils::parse_required(cli_args, "builder-fallback-skips")?; + client_config.chain.builder_fallback_skips_per_epoch = + clap_utils::parse_required(cli_args, "builder-fallback-skips-per-epoch")?; + client_config + .chain + .builder_fallback_epochs_since_finalization = + clap_utils::parse_required(cli_args, "builder-fallback-epochs-since-finalization")?; + client_config.chain.builder_fallback_disable_checks = + cli_args.is_present("builder-fallback-disable-checks"); + Ok(client_config) } diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index c3e99d7a861..d05677465b5 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -18,21 +18,21 @@ * [Create a validator](./validator-create.md) * [Key recovery](./key-recovery.md) * [Validator Management](./validator-management.md) - * [Importing from the Staking Launchpad](./validator-import-launchpad.md) + * [Importing from the Staking Launchpad](./validator-import-launchpad.md) * [Slashing Protection](./slashing-protection.md) * [Voluntary Exits](./voluntary-exit.md) * [Validator Monitoring](./validator-monitoring.md) * [Doppelganger Protection](./validator-doppelganger.md) * [Suggested Fee Recipient](./suggested-fee-recipient.md) * [APIs](./api.md) - * [Beacon Node API](./api-bn.md) - * [/lighthouse](./api-lighthouse.md) - * [Validator Inclusion APIs](./validator-inclusion.md) - * [Validator Client API](./api-vc.md) - * [Endpoints](./api-vc-endpoints.md) - * [Authorization Header](./api-vc-auth-header.md) - * [Signature Header](./api-vc-sig-header.md) - * [Prometheus Metrics](./advanced_metrics.md) + * [Beacon Node API](./api-bn.md) + * [/lighthouse](./api-lighthouse.md) + * [Validator Inclusion APIs](./validator-inclusion.md) + * [Validator Client API](./api-vc.md) + * [Endpoints](./api-vc-endpoints.md) + * [Authorization Header](./api-vc-auth-header.md) + * [Signature Header](./api-vc-sig-header.md) + * [Prometheus Metrics](./advanced_metrics.md) * [Advanced Usage](./advanced.md) * [Checkpoint Sync](./checkpoint-sync.md) * [Custom Data Directories](./advanced-datadir.md) @@ -45,6 +45,7 @@ * [Redundancy](./redundancy.md) * [Pre-Releases](./advanced-pre-releases.md) * [Release Candidates](./advanced-release-candidates.md) + * [MEV and Lighthouse](./builders.md) * [Contributing](./contributing.md) - * [Development Environment](./setup.md) + * [Development Environment](./setup.md) * [FAQs](./faq.md) diff --git a/book/src/builders.md b/book/src/builders.md new file mode 100644 index 00000000000..78a80899cc5 --- /dev/null +++ b/book/src/builders.md @@ -0,0 +1,144 @@ +# MEV and Lighthouse + +Lighthouse is able to interact with servers that implement the [builder +API](https://github.com/ethereum/builder-specs), allowing it to produce blocks without having +knowledge of the transactions included in the block. This enables Lighthouse to outsource the job of +transaction gathering/ordering within a block to parties specialized in this particular task. For +economic reasons, these parties will refuse to reveal the list of transactions to the validator +before the validator has committed to (i.e. signed) the block. A primer on MEV can be found +[here]([MEV](https://ethereum.org/en/developers/docs/mev/)). + +Using the builder API is not known to introduce additional slashing risks, however a live-ness risk +(i.e. the ability for the chain to produce valid blocks) is introduced because your node will be +signing blocks without executing the transactions within the block. Therefore it won't know whether +the transactions are valid and it may sign a block that the network will reject. This would lead to +a missed proposal and the opportunity cost of lost block rewards. + +## How to connect to a builder + +The beacon node and validator client each require a new flag for lighthouse to be fully compatible with builder API servers. + +``` +lighthouse bn --builder https://mainnet-builder.test +``` +The `--builder` flag will cause the beacon node to query the provided URL during block production for a block +payload with stubbed-out transactions. If this request fails, Lighthouse will fall back to the local +execution engine and produce a block using transactions gathered and verified locally. + +The beacon node will *only* query for this type of block (a "blinded" block) when a validator specifically requests it. +Otherwise, it will continue to serve full blocks as normal. In order to configure the validator client to query for +blinded blocks, you should use the following flag: + +``` +lighthouse vc --builder-proposals +``` +With the `--builder-proposals` flag, the validator client will ask for blinded blocks for all validators it manages. +In order to configure whether a validator queries for blinded blocks check out [this section.](#validator-client-configuration) + +## Multiple builders + +Lighthouse currently only supports a connection to a single builder. If you'd like to connect to multiple builders or +relays, run one of the following services and configure lighthouse to use it with the `--builder` flag. + +* [`mev-boost`][mev-boost] +* [`mev-rs`][mev-rs] + +## Validator Client Configuration + +In the validator client you can configure gas limit, fee recipient and whether to use the builder API on a +per-validator basis or set a configuration for all validators managed by the validator client. CLI flags for each of these +will serve as default values for all validators managed by the validator client. In order to manage the values +per-validator you can either make updates to the `validator_definitions.yml` file or you can use the HTTP requests +described below. + +Both the gas limit and fee recipient will be passed along as suggestions to connected builders. If there is a discrepancy +in either, it will *not* keep you from proposing a block with the builder. This is because the bounds on gas limit are calculated based +on prior execution blocks, so it should be managed by an execution engine, even if it is external. Depending on the +connected relay, payment to the proposer might be in the form of a transaction within the block to the fee recipient, +so a discrepancy in fee recipient might not indicate that there is something afoot. If you know the relay you are connected to *should* +only create blocks with a `fee_recipient` field matching the one suggested, you can use +the [strict fee recipient](suggested-fee-recipient.md#strict-fee-recipient) flag. + +### Enable/Disable builder proposals and set Gas Limit +Use the [lighthouse API](api-vc-endpoints.md) to configure these fields per-validator. + +#### `PATCH /lighthouse/validators/:voting_pubkey` + + +#### HTTP Specification + +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/validators/:voting_pubkey` | +| Method | PATCH | +| Required Headers | [`Authorization`](./api-vc-auth-header.md) | +| Typical Responses | 200, 400 | + +#### Example Path + +``` +localhost:5062/lighthouse/validators/0xb0148e6348264131bf47bcd1829590e870c836dc893050fd0dadc7a28949f9d0a72f2805d027521b45441101f0cc1cde +``` + +#### Example Request Body +Each field is optional. +```json +{ + "builder_proposals": true, + "gas_limit": 3000000001 +} +``` + +#### Example Response Body + +```json +null +``` +### Fee Recipient + +Refer to [suggested fee recipient](suggested-fee-recipient.md) documentation. + +### Validator definitions example +``` +--- +- enabled: true + voting_public_key: "0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007" + type: local_keystore + voting_keystore_path: /home/paul/.lighthouse/validators/0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007/voting-keystore.json + voting_keystore_password_path: /home/paul/.lighthouse/secrets/0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007 + suggested_fee_recipient: "0x6cc8dcbca744a6e4ffedb98e1d0df903b10abd21" + gas_limit: 3000000001 + builder_proposals: true +- enabled: false + voting_public_key: "0xa5566f9ec3c6e1fdf362634ebec9ef7aceb0e460e5079714808388e5d48f4ae1e12897fed1bea951c17fa389d511e477" + type: local_keystore voting_keystore_path: /home/paul/.lighthouse/validators/0xa5566f9ec3c6e1fdf362634ebec9ef7aceb0e460e5079714808388e5d48f4ae1e12897fed1bea951c17fa389d511e477/voting-keystore.json + voting_keystore_password: myStrongpa55word123&$ + suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + gas_limit: 333333333 + builder_proposals: true +``` + +## Circuit breaker conditions + +By outsourcing payload construction and signing blocks without verifying transactions, we are creating a new risk to +live-ness. If most of the network is using a small set of relays and one is bugged, a string of missed proposals could +happen quickly. This is not only generally bad for the network, but if you have a proposal coming up, you might not +realize that your next proposal is likely to be missed until it's too late. So we've implemented some "chain health" +checks to try and avoid scenarios like this. + +By default, Lighthouse is strict with these conditions, but we encourage users to learn about and adjust them. + +- `--builder-fallback-skips` - If we've seen this number of skip slots on the canonical chain in a row prior to proposing, we will NOT query + any connected builders, and will use the local execution engine for payload construction. +- `--builder-fallback-skips-per-epoch` - If we've seen this number of skip slots on the canonical chain in the past `SLOTS_PER_EPOCH`, we will NOT + query any connected builders, and will use the local execution engine for payload construction. +- `--builder-fallback-epochs-since-finalization` - If we're proposing and the chain has not finalized within + this number of epochs, we will NOT query any connected builders, and will use the local execution engine for payload + construction. Setting this value to anything less than 2 will cause the node to NEVER query connected builders. Setting + it to 2 will cause this condition to be hit if there are skips slots at the start of an epoch, right before this node + is set to propose. +- `--builder-fallback-disable-checks` - This flag disables all checks related to chain health. This means the builder + API will always be used for payload construction, regardless of recent chain conditions. + +[mev-rs]: https://github.com/ralexstokes/mev-rs +[mev-boost]: https://github.com/flashbots/mev-boost diff --git a/book/src/suggested-fee-recipient.md b/book/src/suggested-fee-recipient.md index c401abfb7ad..a584be306f3 100644 --- a/book/src/suggested-fee-recipient.md +++ b/book/src/suggested-fee-recipient.md @@ -30,6 +30,15 @@ Assuming trustworthy nodes, the priority for the four methods is: 1. `--suggested-fee-recipient` provided to the VC. 1. `--suggested-fee-recipient` provided to the BN. +## Strict Fee Recipient + +If the flag `--strict-fee-recipient` is set in the validator client, Lighthouse will refuse to sign any block whose +`fee_recipient` does not match the `suggested_fee_recipient` sent by this validator. This applies to both the normal +block proposal flow and block proposals through the builder API. Proposals through the builder API are more likely +to have a discrepancy in `fee_recipient` so you should be aware of how your connected relay sends proposer payments before +using this flag. If this flag is used, a fee recipient mismatch in the builder API flow will result in a fallback to the +local execution engine for payload construction, where a strict fee recipient check will still be applied. + ### 1. Setting the fee recipient in the `validator_definitions.yml` Users can set the fee recipient in `validator_definitions.yml` with the `suggested_fee_recipient` @@ -62,15 +71,6 @@ validators where a `suggested_fee_recipient` is not loaded from another method. The `--suggested-fee-recipient` can be provided to the BN to act as a default value when the validator client does not transmit a `suggested_fee_recipient` to the BN. -## Strict Fee Recipient - -If the flag `--strict-fee-recipient` is set in the validator client, Lighthouse will refuse to sign any block whose -`fee_recipient` does not match the `suggested_fee_recipient` sent by this validator. This applies to both the normal -block proposal flow, as well as block proposals through the builder API. Proposals through the builder API are more likely -to have a discrepancy in `fee_recipient` so you should be aware of how your connected relay sends proposer payments before -using this flag. If this flag is used, a fee recipient mismatch in the builder API flow will result in a fallback to the -local execution engine for payload construction, where a strict fee recipient check will still be applied. - ## Setting the fee recipient dynamically using the keymanager API When the [validator client API](api-vc.md) is enabled, the diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index e68737e2595..66e3b735473 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -109,6 +109,12 @@ pub struct ValidatorDefinition { #[serde(skip_serializing_if = "Option::is_none")] pub suggested_fee_recipient: Option
, #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub gas_limit: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_proposals: Option, + #[serde(default)] pub description: String, #[serde(flatten)] pub signing_definition: SigningDefinition, @@ -126,6 +132,8 @@ impl ValidatorDefinition { voting_keystore_password: Option, graffiti: Option, suggested_fee_recipient: Option
, + gas_limit: Option, + builder_proposals: Option, ) -> Result { let voting_keystore_path = voting_keystore_path.as_ref().into(); let keystore = @@ -138,6 +146,8 @@ impl ValidatorDefinition { description: keystore.description().unwrap_or("").to_string(), graffiti, suggested_fee_recipient, + gas_limit, + builder_proposals, signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, voting_keystore_password_path: None, @@ -284,6 +294,8 @@ impl ValidatorDefinitions { description: keystore.description().unwrap_or("").to_string(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, voting_keystore_password_path, @@ -526,4 +538,84 @@ mod tests { Some(Address::from_str("0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d").unwrap()) ); } + + #[test] + fn gas_limit_checks() { + let no_gas_limit = r#"--- + description: "" + enabled: true + type: local_keystore + suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + voting_keystore_path: "" + voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" + "#; + let def: ValidatorDefinition = serde_yaml::from_str(no_gas_limit).unwrap(); + assert!(def.gas_limit.is_none()); + + let invalid_gas_limit = r#"--- + description: "" + enabled: true + type: local_keystore + suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + gas_limit: "banana" + voting_keystore_path: "" + voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" + "#; + + let def: Result = serde_yaml::from_str(invalid_gas_limit); + assert!(def.is_err()); + + let valid_gas_limit = r#"--- + description: "" + enabled: true + type: local_keystore + suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + gas_limit: 35000000 + voting_keystore_path: "" + voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" + "#; + + let def: ValidatorDefinition = serde_yaml::from_str(valid_gas_limit).unwrap(); + assert_eq!(def.gas_limit, Some(35000000)); + } + + #[test] + fn builder_proposals_checks() { + let no_builder_proposals = r#"--- + description: "" + enabled: true + type: local_keystore + suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + voting_keystore_path: "" + voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" + "#; + let def: ValidatorDefinition = serde_yaml::from_str(no_builder_proposals).unwrap(); + assert!(def.builder_proposals.is_none()); + + let invalid_builder_proposals = r#"--- + description: "" + enabled: true + type: local_keystore + suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + builder_proposals: "banana" + voting_keystore_path: "" + voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" + "#; + + let def: Result = serde_yaml::from_str(invalid_builder_proposals); + assert!(def.is_err()); + + let valid_builder_proposals = r#"--- + description: "" + enabled: true + type: local_keystore + suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" + builder_proposals: true + voting_keystore_path: "" + voting_public_key: "0xaf3c7ddab7e293834710fca2d39d068f884455ede270e0d0293dc818e4f2f0f975355067e8437955cb29aec674e5c9e7" + "#; + + let def: ValidatorDefinition = serde_yaml::from_str(valid_builder_proposals).unwrap(); + assert_eq!(def.builder_proposals, Some(true)); + } } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 1025959165b..8cd138e9807 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1280,7 +1280,7 @@ impl BeaconNodeHttpClient { .await } - /// `GET v2/validator/blocks/{slot}` + /// `GET v1/validator/blinded_blocks/{slot}` pub async fn get_validator_blinded_blocks_with_verify_randao< T: EthSpec, Payload: ExecPayload, @@ -1291,7 +1291,7 @@ impl BeaconNodeHttpClient { graffiti: Option<&Graffiti>, verify_randao: Option, ) -> Result>, Error> { - let mut path = self.eth_path(V2)?; + let mut path = self.eth_path(V1)?; path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index d678ca34b77..abed4fe5e77 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -462,7 +462,9 @@ impl ValidatorClientHttpClient { pub async fn patch_lighthouse_validators( &self, voting_pubkey: &PublicKeyBytes, - enabled: bool, + enabled: Option, + gas_limit: Option, + builder_proposals: Option, ) -> Result<(), Error> { let mut path = self.server.full.clone(); @@ -472,7 +474,15 @@ impl ValidatorClientHttpClient { .push("validators") .push(&voting_pubkey.to_string()); - self.patch(path, &ValidatorPatchRequest { enabled }).await + self.patch( + path, + &ValidatorPatchRequest { + enabled, + gas_limit, + builder_proposals, + }, + ) + .await } fn make_keystores_url(&self) -> Result { diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index 3e1c13dcf87..d829c97cc7e 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -26,6 +26,12 @@ pub struct ValidatorRequest { #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub suggested_fee_recipient: Option
, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub gas_limit: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_proposals: Option, #[serde(with = "eth2_serde_utils::quoted_u64")] pub deposit_gwei: u64, } @@ -49,6 +55,12 @@ pub struct CreatedValidator { #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub suggested_fee_recipient: Option
, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub gas_limit: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_proposals: Option, pub eth1_deposit_tx_data: String, #[serde(with = "eth2_serde_utils::quoted_u64")] pub deposit_gwei: u64, @@ -62,7 +74,15 @@ pub struct PostValidatorsResponseData { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ValidatorPatchRequest { - pub enabled: bool, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub enabled: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub gas_limit: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_proposals: Option, } #[derive(Clone, PartialEq, Serialize, Deserialize)] @@ -70,8 +90,18 @@ pub struct KeystoreValidatorsPostRequest { pub password: ZeroizeString, pub enable: bool, pub keystore: Keystore, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] pub graffiti: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] pub suggested_fee_recipient: Option
, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub gas_limit: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_proposals: Option, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] @@ -84,6 +114,12 @@ pub struct Web3SignerValidatorRequest { #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub suggested_fee_recipient: Option
, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub gas_limit: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub builder_proposals: Option, pub voting_public_key: PublicKey, pub url: String, #[serde(default)] diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs index 1726f2ad077..047bceae7e2 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder_bid.rs @@ -1,13 +1,14 @@ -use crate::{EthSpec, ExecPayload, ExecutionPayloadHeader, Uint256}; -use bls::blst_implementations::PublicKeyBytes; +use crate::{ChainSpec, EthSpec, ExecPayload, ExecutionPayloadHeader, SignedRoot, Uint256}; +use bls::PublicKeyBytes; use bls::Signature; use serde::{Deserialize as De, Deserializer, Serialize as Ser, Serializer}; use serde_derive::{Deserialize, Serialize}; use serde_with::{serde_as, DeserializeAs, SerializeAs}; use std::marker::PhantomData; +use tree_hash_derive::TreeHash; #[serde_as] -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +#[derive(PartialEq, Debug, Serialize, Deserialize, TreeHash, Clone)] #[serde(bound = "E: EthSpec, Payload: ExecPayload")] pub struct BuilderBid> { #[serde_as(as = "BlindedPayloadAsHeader")] @@ -16,9 +17,12 @@ pub struct BuilderBid> { pub value: Uint256, pub pubkey: PublicKeyBytes, #[serde(skip)] + #[tree_hash(skip_hashing)] _phantom_data: PhantomData, } +impl> SignedRoot for BuilderBid {} + /// Validator registration, for use in interacting with servers implementing the builder API. #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] #[serde(bound = "E: EthSpec, Payload: ExecPayload")] @@ -50,3 +54,17 @@ impl<'de, E: EthSpec, Payload: ExecPayload> DeserializeAs<'de, Payload> .map_err(|_| serde::de::Error::custom("unable to convert payload header to payload")) } } + +impl> SignedBuilderBid { + pub fn verify_signature(&self, spec: &ChainSpec) -> bool { + self.message + .pubkey + .decompress() + .map(|pubkey| { + let domain = spec.get_builder_domain(); + let message = self.message.signing_root(domain); + self.signature.verify(&pubkey, message) + }) + .unwrap_or(false) + } +} diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 8a69505a51f..3668d0524cd 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -1355,4 +1355,12 @@ mod yaml_tests { ) ); } + + #[test] + fn test_domain_builder() { + assert_eq!( + int_to_bytes4(ApplicationDomain::Builder.get_domain_constant()), + [0, 0, 0, 1] + ); + } } diff --git a/consensus/types/src/execution_block_hash.rs b/consensus/types/src/execution_block_hash.rs index dbfe2181592..978bd4c69a0 100644 --- a/consensus/types/src/execution_block_hash.rs +++ b/consensus/types/src/execution_block_hash.rs @@ -1,12 +1,14 @@ use crate::test_utils::TestRandom; use crate::Hash256; +use derivative::Derivative; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; use std::fmt; #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] -#[derive(Default, Debug, Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Hash)] +#[derive(Default, Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Hash, Derivative)] +#[derivative(Debug = "transparent")] #[serde(transparent)] pub struct ExecutionBlockHash(Hash256); diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 4a8552d2490..114ca02ecff 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -9,6 +9,7 @@ use std::hash::Hash; use test_random_derive::TestRandom; use tree_hash::TreeHash; +#[derive(Debug)] pub enum BlockType { Blinded, Full, @@ -18,6 +19,7 @@ pub trait ExecPayload: Debug + Clone + Encode + + Debug + Decode + TestRandom + TreeHash @@ -45,6 +47,7 @@ pub trait ExecPayload: fn timestamp(&self) -> u64; fn block_hash(&self) -> ExecutionBlockHash; fn fee_recipient(&self) -> Address; + fn gas_limit(&self) -> u64; } impl ExecPayload for FullPayload { @@ -79,6 +82,10 @@ impl ExecPayload for FullPayload { fn fee_recipient(&self) -> Address { self.execution_payload.fee_recipient } + + fn gas_limit(&self) -> u64 { + self.execution_payload.gas_limit + } } impl ExecPayload for BlindedPayload { @@ -113,6 +120,10 @@ impl ExecPayload for BlindedPayload { fn fee_recipient(&self) -> Address { self.execution_payload_header.fee_recipient } + + fn gas_limit(&self) -> u64 { + self.execution_payload_header.gas_limit + } } #[derive(Debug, Clone, TestRandom, Serialize, Deserialize, Derivative)] diff --git a/lighthouse/tests/account_manager.rs b/lighthouse/tests/account_manager.rs index 06b0303c69e..696830a0d1d 100644 --- a/lighthouse/tests/account_manager.rs +++ b/lighthouse/tests/account_manager.rs @@ -494,6 +494,8 @@ fn validator_import_launchpad() { description: "".into(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, voting_public_key: keystore.public_key().unwrap(), signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, @@ -614,6 +616,8 @@ fn validator_import_launchpad_no_password_then_add_password() { description: "".into(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, voting_public_key: keystore.public_key().unwrap(), signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, @@ -638,6 +642,8 @@ fn validator_import_launchpad_no_password_then_add_password() { description: "".into(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, voting_public_key: keystore.public_key().unwrap(), signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path: dst_keystore_dir.join(KEYSTORE_NAME), @@ -738,6 +744,8 @@ fn validator_import_launchpad_password_file() { voting_public_key: keystore.public_key().unwrap(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, voting_keystore_password_path: None, diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index a9f8900d0cf..1f6855cba4c 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -394,25 +394,36 @@ fn merge_fee_recipient_flag() { fn run_payload_builder_flag_test(flag: &str, builders: &str) { use sensitive_url::SensitiveUrl; - let dir = TempDir::new().expect("Unable to create temporary directory"); let all_builders: Vec<_> = builders .split(",") .map(|builder| SensitiveUrl::parse(builder).expect("valid builder url")) .collect(); - CommandLineTest::new() - .flag("execution-endpoint", Some("http://meow.cats")) + run_payload_builder_flag_test_with_config(flag, builders, None, None, |config| { + let config = config.execution_layer.as_ref().unwrap(); + // Only first provided endpoint is parsed as we don't support + // redundancy. + assert_eq!(config.builder_url, all_builders.get(0).cloned()); + }) +} +fn run_payload_builder_flag_test_with_config( + flag: &str, + builders: &str, + additional_flag: Option<&str>, + additional_flag_value: Option<&str>, + f: F, +) { + let dir = TempDir::new().expect("Unable to create temporary directory"); + let mut test = CommandLineTest::new(); + test.flag("execution-endpoint", Some("http://meow.cats")) .flag( "execution-jwt", dir.path().join("jwt-file").as_os_str().to_str(), ) - .flag(flag, Some(builders)) - .run_with_zero_port() - .with_config(|config| { - let config = config.execution_layer.as_ref().unwrap(); - // Only first provided endpoint is parsed as we don't support - // redundancy. - assert_eq!(config.builder_url, all_builders.get(0).cloned()); - }); + .flag(flag, Some(builders)); + if let Some(additional_flag_name) = additional_flag { + test.flag(additional_flag_name, additional_flag_value); + } + test.run_with_zero_port().with_config(f); } #[test] @@ -420,7 +431,46 @@ fn payload_builder_flags() { run_payload_builder_flag_test("builder", "http://meow.cats"); run_payload_builder_flag_test("payload-builder", "http://meow.cats"); run_payload_builder_flag_test("payload-builders", "http://meow.cats,http://woof.dogs"); - run_payload_builder_flag_test("payload-builders", "http://meow.cats,http://woof.dogs"); +} + +#[test] +fn builder_fallback_flags() { + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("builder-fallback-skips"), + Some("7"), + |config| { + assert_eq!(config.chain.builder_fallback_skips, 7); + }, + ); + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("builder-fallback-skips-per-epoch"), + Some("11"), + |config| { + assert_eq!(config.chain.builder_fallback_skips_per_epoch, 11); + }, + ); + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("builder-fallback-epochs-since-finalization"), + Some("4"), + |config| { + assert_eq!(config.chain.builder_fallback_epochs_since_finalization, 4); + }, + ); + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("builder-fallback-disable-checks"), + None, + |config| { + assert_eq!(config.chain.builder_fallback_disable_checks, true); + }, + ); } fn run_jwt_optional_flags_test(jwt_flag: &str, jwt_id_flag: &str, jwt_version_flag: &str) { diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 98b159e996b..21dc4d78726 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -389,6 +389,48 @@ fn no_doppelganger_protection_flag() { .with_config(|config| assert!(!config.enable_doppelganger_protection)); } #[test] +fn no_gas_limit_flag() { + CommandLineTest::new() + .run() + .with_config(|config| assert!(config.gas_limit.is_none())); +} +#[test] +fn gas_limit_flag() { + CommandLineTest::new() + .flag("gas-limit", Some("600")) + .flag("builder-proposals", None) + .run() + .with_config(|config| assert_eq!(config.gas_limit, Some(600))); +} +#[test] +fn no_builder_proposals_flag() { + CommandLineTest::new() + .run() + .with_config(|config| assert!(!config.builder_proposals)); +} +#[test] +fn builder_proposals_flag() { + CommandLineTest::new() + .flag("builder-proposals", None) + .run() + .with_config(|config| assert!(config.builder_proposals)); +} +#[test] +fn no_builder_registration_timestamp_override_flag() { + CommandLineTest::new() + .run() + .with_config(|config| assert!(config.builder_registration_timestamp_override.is_none())); +} +#[test] +fn builder_registration_timestamp_override_flag() { + CommandLineTest::new() + .flag("builder-registration-timestamp-override", Some("100")) + .run() + .with_config(|config| { + assert_eq!(config.builder_registration_timestamp_override, Some(100)) + }); +} +#[test] fn strict_fee_recipient_flag() { CommandLineTest::new() .flag("strict-fee-recipient", None) diff --git a/scripts/local_testnet/print_logs.sh b/scripts/local_testnet/dump_logs.sh similarity index 83% rename from scripts/local_testnet/print_logs.sh rename to scripts/local_testnet/dump_logs.sh index 2a9e7822a6f..dc5f4edd38a 100755 --- a/scripts/local_testnet/print_logs.sh +++ b/scripts/local_testnet/dump_logs.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Print the tail of all the logs output from local testnet +# Print all the logs output from local testnet set -Eeuo pipefail @@ -12,6 +12,6 @@ do echo "=============================================================================" echo "$f" echo "=============================================================================" - tail "$f" + cat "$f" echo "" done diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index 33c1d642e73..dcc0a5382a9 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -10,12 +10,14 @@ ulimit -n 65536 # VC_COUNT is defaulted in vars.env DEBUG_LEVEL=${DEBUG_LEVEL:-info} +BUILDER_PROPOSALS= # Get options -while getopts "v:d:h" flag; do +while getopts "v:d:ph" flag; do case "${flag}" in v) VC_COUNT=${OPTARG};; d) DEBUG_LEVEL=${OPTARG};; + p) BUILDER_PROPOSALS="-p";; h) validators=$(( $VALIDATOR_COUNT / $BN_COUNT )) echo "Start local testnet, defaults: 1 eth1 node, $BN_COUNT beacon nodes," @@ -26,6 +28,7 @@ while getopts "v:d:h" flag; do echo "Options:" echo " -v: VC_COUNT default: $VC_COUNT" echo " -d: DEBUG_LEVEL default: info" + echo " -p: enable private tx proposals" echo " -h: this help" exit ;; @@ -116,7 +119,7 @@ done # Start requested number of validator clients for (( vc=1; vc<=$VC_COUNT; vc++ )); do - execute_command_add_PID validator_node_$vc.log ./validator_client.sh $DATADIR/node_$vc http://localhost:$((BN_http_port_base + $vc)) $DEBUG_LEVEL + execute_command_add_PID validator_node_$vc.log ./validator_client.sh $BUILDER_PROPOSALS -d $DEBUG_LEVEL $DATADIR/node_$vc http://localhost:$((BN_http_port_base + $vc)) done echo "Started!" diff --git a/scripts/local_testnet/validator_client.sh b/scripts/local_testnet/validator_client.sh index 5aa75dfe2d0..975a2a6753c 100755 --- a/scripts/local_testnet/validator_client.sh +++ b/scripts/local_testnet/validator_client.sh @@ -10,13 +10,24 @@ set -Eeuo pipefail source ./vars.env -DEBUG_LEVEL=${3:-info} +DEBUG_LEVEL=info + +BUILDER_PROPOSALS= + +# Get options +while getopts "pd:" flag; do + case "${flag}" in + p) BUILDER_PROPOSALS="--builder-proposals";; + d) DEBUG_LEVEL=${OPTARG};; + esac +done exec lighthouse \ --debug-level $DEBUG_LEVEL \ vc \ - --datadir $1 \ + $BUILDER_PROPOSALS \ + --datadir ${@:$OPTIND:1} \ --testnet-dir $TESTNET_DIR \ --init-slashing-protection \ - --beacon-nodes $2 \ + --beacon-nodes ${@:$OPTIND+1:1} \ $VC_ARGS diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index f86148312fe..798dae083be 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -17,8 +17,9 @@ use state_processing::per_block_processing::{ use std::fmt::Debug; use std::path::Path; use types::{ - Attestation, AttesterSlashing, BeaconBlock, BeaconState, ChainSpec, Deposit, EthSpec, ForkName, - FullPayload, ProposerSlashing, SignedVoluntaryExit, SyncAggregate, + Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlindedPayload, ChainSpec, Deposit, + EthSpec, ExecutionPayload, ForkName, FullPayload, ProposerSlashing, SignedVoluntaryExit, + SyncAggregate, }; #[derive(Debug, Clone, Default, Deserialize)] @@ -255,6 +256,40 @@ impl Operation for FullPayload { } } } +impl Operation for BlindedPayload { + fn handler_name() -> String { + "execution_payload".into() + } + + fn filename() -> String { + "execution_payload.ssz_snappy".into() + } + + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name != ForkName::Base && fork_name != ForkName::Altair + } + + fn decode(path: &Path, _spec: &ChainSpec) -> Result { + ssz_decode_file::>(path).map(Into::into) + } + + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + extra: &Operations, + ) -> Result<(), BlockProcessingError> { + let valid = extra + .execution_metadata + .as_ref() + .map_or(false, |e| e.execution_valid); + if valid { + process_execution_payload(state, self, spec) + } else { + Err(BlockProcessingError::ExecutionInvalid) + } + } +} impl> LoadCase for Operations { fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index 540fe6903ea..c075e89b3fe 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -56,6 +56,7 @@ type_name!(Eth1Data); type_name_generic!(ExecutionPayload); type_name_generic!(FullPayload, "ExecutionPayload"); type_name_generic!(ExecutionPayloadHeader); +type_name_generic!(BlindedPayload, "ExecutionPayloadHeader"); type_name!(Fork); type_name!(ForkData); type_name_generic!(HistoricalBatch); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 91345fb669a..31abbd1591a 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -71,11 +71,17 @@ fn operations_sync_aggregate() { } #[test] -fn operations_execution_payload() { +fn operations_execution_payload_full() { OperationsHandler::>::default().run(); OperationsHandler::>::default().run(); } +#[test] +fn operations_execution_payload_blinded() { + OperationsHandler::>::default().run(); + OperationsHandler::>::default().run(); +} + #[test] fn sanity_blocks() { SanityBlocksHandler::::default().run(); diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 7126268c373..0aa960bc414 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -3,7 +3,9 @@ use crate::execution_engine::{ }; use crate::transactions::transactions; use ethers_providers::Middleware; -use execution_layer::{ExecutionLayer, PayloadAttributes, PayloadStatus}; +use execution_layer::{ + BuilderParams, ChainHealth, ExecutionLayer, PayloadAttributes, PayloadStatus, +}; use fork_choice::ForkchoiceUpdateParameters; use reqwest::{header::CONTENT_TYPE, Client}; use sensitive_url::SensitiveUrl; @@ -14,7 +16,7 @@ use task_executor::TaskExecutor; use tokio::time::sleep; use types::{ Address, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, FullPayload, Hash256, - MainnetEthSpec, Slot, Uint256, + MainnetEthSpec, PublicKeyBytes, Slot, Uint256, }; const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(20); @@ -305,6 +307,11 @@ impl TestRig { // in CI. sleep(Duration::from_secs(3)).await; + let builder_params = BuilderParams { + pubkey: PublicKeyBytes::empty(), + slot: Slot::new(0), + chain_health: ChainHealth::Healthy, + }; let valid_payload = self .ee_a .execution_layer @@ -313,9 +320,9 @@ impl TestRig { timestamp, prev_randao, proposer_index, - None, - Slot::new(0), forkchoice_update_params, + builder_params, + &self.spec, ) .await .unwrap() @@ -413,6 +420,11 @@ impl TestRig { let timestamp = valid_payload.timestamp + 1; let prev_randao = Hash256::zero(); let proposer_index = 0; + let builder_params = BuilderParams { + pubkey: PublicKeyBytes::empty(), + slot: Slot::new(0), + chain_health: ChainHealth::Healthy, + }; let second_payload = self .ee_a .execution_layer @@ -421,9 +433,9 @@ impl TestRig { timestamp, prev_randao, proposer_index, - None, - Slot::new(0), forkchoice_update_params, + builder_params, + &self.spec, ) .await .unwrap() diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index bdee18026b4..4f9a574f847 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -302,6 +302,7 @@ mod tests { let slot_clock = TestingSlotClock::new(Slot::new(0), Duration::from_secs(0), Duration::from_secs(1)); + let config = validator_client::Config::default(); let validator_store = ValidatorStore::<_, E>::new( initialized_validators, @@ -310,7 +311,7 @@ mod tests { spec, None, slot_clock, - None, + &config, executor, log.clone(), ); @@ -359,6 +360,8 @@ mod tests { voting_public_key: validator_pubkey.clone(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, description: String::default(), signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path: signer_rig.keystore_path.clone(), @@ -375,6 +378,8 @@ mod tests { voting_public_key: validator_pubkey.clone(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, description: String::default(), signing_definition: SigningDefinition::Web3Signer(Web3SignerDefinition { url: signer_rig.url.to_string(), @@ -450,8 +455,6 @@ mod tests { } } - //TODO: remove this once the consensys web3signer includes the `validator_registration` method - #[allow(dead_code)] fn get_validator_registration(pubkey: PublicKeyBytes) -> ValidatorRegistrationData { let fee_recipient = Address::repeat_byte(42); ValidatorRegistrationData { @@ -513,16 +516,17 @@ mod tests { .await .unwrap() }) - //TODO: uncomment this once the consensys web3signer includes the `validator_registration` method - // - // .await - // .assert_signatures_match("validator_registration", |pubkey, validator_store| async move { - // let val_reg_data = get_validator_registration(pubkey); - // validator_store - // .sign_validator_registration_data(val_reg_data) - // .await - // .unwrap() - // }) + .await + .assert_signatures_match( + "validator_registration", + |pubkey, validator_store| async move { + let val_reg_data = get_validator_registration(pubkey); + validator_store + .sign_validator_registration_data(val_reg_data) + .await + .unwrap() + }, + ) .await; } @@ -599,16 +603,17 @@ mod tests { .unwrap() }, ) - //TODO: uncomment this once the consensys web3signer includes the `validator_registration` method - // - // .await - // .assert_signatures_match("validator_registration", |pubkey, validator_store| async move { - // let val_reg_data = get_validator_registration(pubkey); - // validator_store - // .sign_validator_registration_data(val_reg_data) - // .await - // .unwrap() - // }) + .await + .assert_signatures_match( + "validator_registration", + |pubkey, validator_store| async move { + let val_reg_data = get_validator_registration(pubkey); + validator_store + .sign_validator_registration_data(val_reg_data) + .await + .unwrap() + }, + ) .await; } diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 649f2406451..d47546eb0d4 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -11,9 +11,7 @@ use slot_clock::SlotClock; use std::ops::Deref; use std::sync::Arc; use tokio::sync::mpsc; -use types::{ - BlindedPayload, BlockType, Epoch, EthSpec, ExecPayload, FullPayload, PublicKeyBytes, Slot, -}; +use types::{BlindedPayload, BlockType, EthSpec, ExecPayload, FullPayload, PublicKeyBytes, Slot}; #[derive(Debug)] pub enum BlockError { @@ -44,7 +42,6 @@ pub struct BlockServiceBuilder { context: Option>, graffiti: Option, graffiti_file: Option, - private_tx_proposals: bool, strict_fee_recipient: bool, } @@ -57,7 +54,6 @@ impl BlockServiceBuilder { context: None, graffiti: None, graffiti_file: None, - private_tx_proposals: false, strict_fee_recipient: false, } } @@ -92,11 +88,6 @@ impl BlockServiceBuilder { self } - pub fn private_tx_proposals(mut self, private_tx_proposals: bool) -> Self { - self.private_tx_proposals = private_tx_proposals; - self - } - pub fn strict_fee_recipient(mut self, strict_fee_recipient: bool) -> Self { self.strict_fee_recipient = strict_fee_recipient; self @@ -119,7 +110,6 @@ impl BlockServiceBuilder { .ok_or("Cannot build BlockService without runtime_context")?, graffiti: self.graffiti, graffiti_file: self.graffiti_file, - private_tx_proposals: self.private_tx_proposals, strict_fee_recipient: self.strict_fee_recipient, }), }) @@ -134,7 +124,6 @@ pub struct Inner { context: RuntimeContext, graffiti: Option, graffiti_file: Option, - private_tx_proposals: bool, strict_fee_recipient: bool, } @@ -244,32 +233,29 @@ impl BlockService { ) } - let private_tx_proposals = self.private_tx_proposals; - let merge_slot = self - .context - .eth2_config - .spec - .bellatrix_fork_epoch - .unwrap_or_else(Epoch::max_value) - .start_slot(E::slots_per_epoch()); for validator_pubkey in proposers { + let builder_proposals = self + .validator_store + .get_builder_proposals(&validator_pubkey); let service = self.clone(); let log = log.clone(); self.inner.context.executor.spawn( async move { - let publish_result = if private_tx_proposals && slot >= merge_slot { + let publish_result = if builder_proposals { let mut result = service.clone() .publish_block::>(slot, validator_pubkey) .await; match result.as_ref() { Err(BlockError::Recoverable(e)) => { - error!(log, "Error whilst producing a blinded block, attempting to publish full block"; "error" => ?e); + error!(log, "Error whilst producing a blinded block, attempting to \ + publish full block"; "error" => ?e); result = service .publish_block::>(slot, validator_pubkey) .await; }, Err(BlockError::Irrecoverable(e)) => { - error!(log, "Error whilst producing a blinded block, cannot fallback because block was signed"; "error" => ?e); + error!(log, "Error whilst producing a blinded block, cannot fallback \ + because the block was signed"; "error" => ?e); }, _ => {}, }; @@ -344,12 +330,12 @@ impl BlockService { let block = self .beacon_nodes .first_success(RequireSynced::No, |beacon_node| async move { - let get_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BEACON_BLOCK_HTTP_GET], - ); let block = match Payload::block_type() { BlockType::Full => { + let _get_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BEACON_BLOCK_HTTP_GET], + ); beacon_node .get_validator_blocks::( slot, @@ -366,6 +352,10 @@ impl BlockService { .data } BlockType::Blinded => { + let _get_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BLINDED_BEACON_BLOCK_HTTP_GET], + ); beacon_node .get_validator_blinded_blocks::( slot, @@ -382,7 +372,6 @@ impl BlockService { .data } }; - drop(get_timer); // Ensure the correctness of the execution payload's fee recipient. if strict_fee_recipient { @@ -415,43 +404,51 @@ impl BlockService { // Publish block with first available beacon node. self.beacon_nodes .first_success(RequireSynced::No, |beacon_node| async { - let _post_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BEACON_BLOCK_HTTP_POST], - ); - match Payload::block_type() { - BlockType::Full => beacon_node - .post_beacon_blocks(&signed_block) - .await - .map_err(|e| { - BlockError::Irrecoverable(format!( - "Error from beacon node when publishing block: {:?}", - e - )) - })?, - BlockType::Blinded => beacon_node - .post_beacon_blinded_blocks(&signed_block) - .await - .map_err(|e| { - BlockError::Irrecoverable(format!( - "Error from beacon node when publishing block: {:?}", - e - )) - })?, + BlockType::Full => { + let _post_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BEACON_BLOCK_HTTP_POST], + ); + beacon_node + .post_beacon_blocks(&signed_block) + .await + .map_err(|e| { + BlockError::Irrecoverable(format!( + "Error from beacon node when publishing block: {:?}", + e + )) + })? + } + BlockType::Blinded => { + let _post_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BLINDED_BEACON_BLOCK_HTTP_POST], + ); + beacon_node + .post_beacon_blinded_blocks(&signed_block) + .await + .map_err(|e| { + BlockError::Irrecoverable(format!( + "Error from beacon node when publishing block: {:?}", + e + )) + })? + } } - - info!( - log, - "Successfully published block"; - "deposits" => signed_block.message().body().deposits().len(), - "attestations" => signed_block.message().body().attestations().len(), - "graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()), - "slot" => signed_block.slot().as_u64(), - ); Ok::<_, BlockError>(()) }) .await?; + + info!( + log, + "Successfully published block"; + "block_type" => ?Payload::block_type(), + "deposits" => signed_block.message().body().deposits().len(), + "attestations" => signed_block.message().body().attestations().len(), + "graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()), + "slot" => signed_block.slot().as_u64(), + ); Ok(()) } } diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 1f8b7b08baf..ceca31aa75a 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -251,8 +251,9 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(false), ) .arg( - Arg::with_name("private-tx-proposals") - .long("private-tx-proposals") + Arg::with_name("builder-proposals") + .long("builder-proposals") + .alias("private-tx-proposals") .help("If this flag is set, Lighthouse will query the Beacon Node for only block \ headers during proposals and will sign over headers. Useful for outsourcing \ execution payload construction during proposals.") @@ -271,4 +272,22 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { for payload construction, where a strict fee recipient check will still be applied.") .takes_value(false), ) + .arg( + Arg::with_name("builder-registration-timestamp-override") + .long("builder-registration-timestamp-override") + .alias("builder-registration-timestamp-override") + .help("This flag takes a unix timestamp value that will be used to override the \ + timestamp used in the builder api registration") + .takes_value(true), + ) + .arg( + Arg::with_name("gas-limit") + .long("gas-limit") + .value_name("INTEGER") + .takes_value(true) + .help("The gas limit to be used in all builder proposals for all validators managed \ + by this validator client. Note this will not necessarily be used if the gas limit \ + set here moves too far from the previous block's gas limit. [default: 30,000,000]") + .requires("builder-proposals"), + ) } diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 725414b1b9d..42c91927ca8 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -52,7 +52,12 @@ pub struct Config { /// If true, enable functionality that monitors the network for attestations or proposals from /// any of the validators managed by this client before starting up. pub enable_doppelganger_protection: bool, - pub private_tx_proposals: bool, + /// Enable use of the blinded block endpoints during proposals. + pub builder_proposals: bool, + /// Overrides the timestamp field in builder api ValidatorRegistrationV1 + pub builder_registration_timestamp_override: Option, + /// Fallback gas limit. + pub gas_limit: Option, /// A list of custom certificates that the validator client will additionally use when /// connecting to a beacon node over SSL/TLS. pub beacon_nodes_tls_certs: Option>, @@ -91,7 +96,9 @@ impl Default for Config { monitoring_api: None, enable_doppelganger_protection: false, beacon_nodes_tls_certs: None, - private_tx_proposals: false, + builder_proposals: false, + builder_registration_timestamp_override: None, + gas_limit: None, strict_fee_recipient: false, } } @@ -300,8 +307,27 @@ impl Config { config.enable_doppelganger_protection = true; } - if cli_args.is_present("private-tx-proposals") { - config.private_tx_proposals = true; + if cli_args.is_present("builder-proposals") { + config.builder_proposals = true; + } + + config.gas_limit = cli_args + .value_of("gas-limit") + .map(|gas_limit| { + gas_limit + .parse::() + .map_err(|_| "gas-limit is not a valid u64.") + }) + .transpose()?; + + if let Some(registration_timestamp_override) = + cli_args.value_of("builder-registration-timestamp-override") + { + config.builder_registration_timestamp_override = Some( + registration_timestamp_override + .parse::() + .map_err(|_| "builder-registration-timestamp-override is not a valid u64.")?, + ); } if cli_args.is_present("strict-fee-recipient") { diff --git a/validator_client/src/http_api/create_validator.rs b/validator_client/src/http_api/create_validator.rs index db59c25f758..a32ccce6279 100644 --- a/validator_client/src/http_api/create_validator.rs +++ b/validator_client/src/http_api/create_validator.rs @@ -140,6 +140,8 @@ pub async fn create_validators_mnemonic, T: 'static + SlotClock, request.enable, request.graffiti.clone(), request.suggested_fee_recipient, + request.gas_limit, + request.builder_proposals, ) .await .map_err(|e| { @@ -154,6 +156,8 @@ pub async fn create_validators_mnemonic, T: 'static + SlotClock, description: request.description.clone(), graffiti: request.graffiti.clone(), suggested_fee_recipient: request.suggested_fee_recipient, + gas_limit: request.gas_limit, + builder_proposals: request.builder_proposals, voting_pubkey, eth1_deposit_tx_data: eth2_serde_utils::hex::encode(ð1_deposit_data.rlp), deposit_gwei: request.deposit_gwei, diff --git a/validator_client/src/http_api/keystores.rs b/validator_client/src/http_api/keystores.rs index f88aacfca8d..29af8d02056 100644 --- a/validator_client/src/http_api/keystores.rs +++ b/validator_client/src/http_api/keystores.rs @@ -205,6 +205,8 @@ fn import_single_keystore( true, None, None, + None, + None, )) .map_err(|e| format!("failed to initialize validator: {:?}", e))?; diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index 07e7b1e13f0..a5d8d0e71c3 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -413,6 +413,8 @@ pub fn serve( let voting_password = body.password.clone(); let graffiti = body.graffiti.clone(); let suggested_fee_recipient = body.suggested_fee_recipient; + let gas_limit = body.gas_limit; + let builder_proposals = body.builder_proposals; let validator_def = { if let Some(handle) = task_executor.handle() { @@ -423,6 +425,8 @@ pub fn serve( body.enable, graffiti, suggested_fee_recipient, + gas_limit, + builder_proposals, )) .map_err(|e| { warp_utils::reject::custom_server_error(format!( @@ -469,6 +473,8 @@ pub fn serve( voting_public_key: web3signer.voting_public_key, graffiti: web3signer.graffiti, suggested_fee_recipient: web3signer.suggested_fee_recipient, + gas_limit: web3signer.gas_limit, + builder_proposals: web3signer.builder_proposals, description: web3signer.description, signing_definition: SigningDefinition::Web3Signer( Web3SignerDefinition { @@ -515,18 +521,32 @@ pub fn serve( let initialized_validators_rw_lock = validator_store.initialized_validators(); let mut initialized_validators = initialized_validators_rw_lock.write(); - match initialized_validators.is_enabled(&validator_pubkey) { - None => Err(warp_utils::reject::custom_not_found(format!( + match ( + initialized_validators.is_enabled(&validator_pubkey), + initialized_validators.validator(&validator_pubkey.compress()), + ) { + (None, _) => Err(warp_utils::reject::custom_not_found(format!( "no validator for {:?}", validator_pubkey ))), - Some(enabled) if enabled == body.enabled => Ok(()), - Some(_) => { + (Some(is_enabled), Some(initialized_validator)) + if Some(is_enabled) == body.enabled + && initialized_validator.get_gas_limit() == body.gas_limit + && initialized_validator.get_builder_proposals() + == body.builder_proposals => + { + Ok(()) + } + (Some(_), _) => { if let Some(handle) = task_executor.handle() { handle .block_on( - initialized_validators - .set_validator_status(&validator_pubkey, body.enabled), + initialized_validators.set_validator_definition_fields( + &validator_pubkey, + body.enabled, + body.gas_limit, + body.builder_proposals, + ), ) .map_err(|e| { warp_utils::reject::custom_server_error(format!( diff --git a/validator_client/src/http_api/remotekeys.rs b/validator_client/src/http_api/remotekeys.rs index 57b7527e2b9..991dfb8bf73 100644 --- a/validator_client/src/http_api/remotekeys.rs +++ b/validator_client/src/http_api/remotekeys.rs @@ -123,6 +123,8 @@ fn import_single_remotekey( voting_public_key: pubkey, graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, description: String::from("Added by remotekey API"), signing_definition: SigningDefinition::Web3Signer(Web3SignerDefinition { url, diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index 7ee05634172..e67a82634c8 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -83,6 +83,7 @@ impl ApiTester { let mut config = Config::default(); config.validator_dir = validator_dir.path().into(); config.secrets_dir = secrets_dir.path().into(); + config.fee_recipient = Some(TEST_DEFAULT_FEE_RECIPIENT); let spec = E::default_spec(); @@ -103,7 +104,7 @@ impl ApiTester { spec, Some(Arc::new(DoppelgangerService::new(log.clone()))), slot_clock, - Some(TEST_DEFAULT_FEE_RECIPIENT), + &config, executor.clone(), log.clone(), )); @@ -270,6 +271,8 @@ impl ApiTester { description: format!("boi #{}", i), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, deposit_gwei: E::default_spec().max_effective_balance, }) .collect::>(); @@ -401,6 +404,8 @@ impl ApiTester { keystore, graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, }; self.client @@ -419,6 +424,8 @@ impl ApiTester { keystore, graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, }; let response = self @@ -455,6 +462,8 @@ impl ApiTester { description: format!("{}", i), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, voting_public_key: kp.pk, url: format!("http://signer_{}.com/", i), root_certificate_path: None, @@ -484,7 +493,7 @@ impl ApiTester { let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; self.client - .patch_lighthouse_validators(&validator.voting_pubkey, enabled) + .patch_lighthouse_validators(&validator.voting_pubkey, Some(enabled), None, None) .await .unwrap(); @@ -521,6 +530,56 @@ impl ApiTester { self } + + pub async fn set_gas_limit(self, index: usize, gas_limit: u64) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + self.client + .patch_lighthouse_validators(&validator.voting_pubkey, None, Some(gas_limit), None) + .await + .unwrap(); + + self + } + + pub async fn assert_gas_limit(self, index: usize, gas_limit: u64) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + assert_eq!( + self.validator_store.get_gas_limit(&validator.voting_pubkey), + gas_limit + ); + + self + } + + pub async fn set_builder_proposals(self, index: usize, builder_proposals: bool) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + self.client + .patch_lighthouse_validators( + &validator.voting_pubkey, + None, + None, + Some(builder_proposals), + ) + .await + .unwrap(); + + self + } + + pub async fn assert_builder_proposals(self, index: usize, builder_proposals: bool) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + assert_eq!( + self.validator_store + .get_builder_proposals(&validator.voting_pubkey), + builder_proposals + ); + + self + } } struct HdValidatorScenario { @@ -583,6 +642,8 @@ fn routes_with_invalid_auth() { description: <_>::default(), graffiti: <_>::default(), suggested_fee_recipient: <_>::default(), + gas_limit: <_>::default(), + builder_proposals: <_>::default(), deposit_gwei: <_>::default(), }]) .await @@ -612,13 +673,15 @@ fn routes_with_invalid_auth() { keystore, graffiti: <_>::default(), suggested_fee_recipient: <_>::default(), + gas_limit: <_>::default(), + builder_proposals: <_>::default(), }) .await }) .await .test_with_invalid_auth(|client| async move { client - .patch_lighthouse_validators(&PublicKeyBytes::empty(), false) + .patch_lighthouse_validators(&PublicKeyBytes::empty(), Some(false), None, None) .await }) .await @@ -735,6 +798,74 @@ fn validator_enabling() { }); } +#[test] +fn validator_gas_limit() { + let runtime = build_runtime(); + let weak_runtime = Arc::downgrade(&runtime); + runtime.block_on(async { + ApiTester::new(weak_runtime) + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .set_gas_limit(0, 500) + .await + .assert_gas_limit(0, 500) + .await + // Update gas limit while validator is disabled. + .set_validator_enabled(0, false) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(2) + .set_gas_limit(0, 1000) + .await + .set_validator_enabled(0, true) + .await + .assert_enabled_validators_count(2) + .assert_gas_limit(0, 1000) + .await + }); +} + +#[test] +fn validator_builder_proposals() { + let runtime = build_runtime(); + let weak_runtime = Arc::downgrade(&runtime); + runtime.block_on(async { + ApiTester::new(weak_runtime) + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .set_builder_proposals(0, true) + .await + // Test setting builder proposals while the validator is disabled + .set_validator_enabled(0, false) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(2) + .set_builder_proposals(0, false) + .await + .set_validator_enabled(0, true) + .await + .assert_enabled_validators_count(2) + .assert_builder_proposals(0, false) + .await + }); +} + #[test] fn keystore_validator_creation() { let runtime = build_runtime(); diff --git a/validator_client/src/http_api/tests/keystores.rs b/validator_client/src/http_api/tests/keystores.rs index 530993ee053..c3b5f0bb901 100644 --- a/validator_client/src/http_api/tests/keystores.rs +++ b/validator_client/src/http_api/tests/keystores.rs @@ -39,6 +39,8 @@ fn web3signer_validator_with_pubkey(pubkey: PublicKey) -> Web3SignerValidatorReq description: "".into(), graffiti: None, suggested_fee_recipient: None, + gas_limit: None, + builder_proposals: None, voting_public_key: pubkey, url: web3_signer_url(), root_certificate_path: None, @@ -465,7 +467,7 @@ fn import_and_delete_conflicting_web3_signer_keystores() { for pubkey in &pubkeys { tester .client - .patch_lighthouse_validators(pubkey, false) + .patch_lighthouse_validators(pubkey, Some(false), None, None) .await .unwrap(); } diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index 836aab4c1fb..146d008a575 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -11,7 +11,9 @@ pub const UNREGISTERED: &str = "unregistered"; pub const FULL_UPDATE: &str = "full_update"; pub const BEACON_BLOCK: &str = "beacon_block"; pub const BEACON_BLOCK_HTTP_GET: &str = "beacon_block_http_get"; +pub const BLINDED_BEACON_BLOCK_HTTP_GET: &str = "blinded_beacon_block_http_get"; pub const BEACON_BLOCK_HTTP_POST: &str = "beacon_block_http_post"; +pub const BLINDED_BEACON_BLOCK_HTTP_POST: &str = "blinded_beacon_block_http_post"; pub const ATTESTATIONS: &str = "attestations"; pub const ATTESTATIONS_HTTP_GET: &str = "attestations_http_get"; pub const ATTESTATIONS_HTTP_POST: &str = "attestations_http_post"; diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 8069bfcab8a..66a621eb77e 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -110,6 +110,8 @@ pub struct InitializedValidator { signing_method: Arc, graffiti: Option, suggested_fee_recipient: Option
, + gas_limit: Option, + builder_proposals: Option, /// The validators index in `state.validators`, to be updated by an external service. index: Option, } @@ -129,6 +131,22 @@ impl InitializedValidator { SigningMethod::Web3Signer { .. } => None, } } + + pub fn get_suggested_fee_recipient(&self) -> Option
{ + self.suggested_fee_recipient + } + + pub fn get_gas_limit(&self) -> Option { + self.gas_limit + } + + pub fn get_builder_proposals(&self) -> Option { + self.builder_proposals + } + + pub fn get_index(&self) -> Option { + self.index + } } fn open_keystore(path: &Path) -> Result { @@ -292,6 +310,8 @@ impl InitializedValidator { signing_method: Arc::new(signing_method), graffiti: def.graffiti.map(Into::into), suggested_fee_recipient: def.suggested_fee_recipient, + gas_limit: def.gas_limit, + builder_proposals: def.builder_proposals, index: None, }) } @@ -622,7 +642,28 @@ impl InitializedValidators { .and_then(|v| v.suggested_fee_recipient) } - /// Sets the `InitializedValidator` and `ValidatorDefinition` `enabled` values. + /// Returns the `gas_limit` for a given public key specified in the + /// `ValidatorDefinitions`. + pub fn gas_limit(&self, public_key: &PublicKeyBytes) -> Option { + self.validators.get(public_key).and_then(|v| v.gas_limit) + } + + /// Returns the `builder_proposals` for a given public key specified in the + /// `ValidatorDefinitions`. + pub fn builder_proposals(&self, public_key: &PublicKeyBytes) -> Option { + self.validators + .get(public_key) + .and_then(|v| v.builder_proposals) + } + + /// Returns an `Option` of a reference to an `InitializedValidator` for a given public key specified in the + /// `ValidatorDefinitions`. + pub fn validator(&self, public_key: &PublicKeyBytes) -> Option<&InitializedValidator> { + self.validators.get(public_key) + } + + /// Sets the `InitializedValidator` and `ValidatorDefinition` `enabled`, `gas_limit`, and `builder_proposals` + /// values. /// /// ## Notes /// @@ -630,11 +671,17 @@ impl InitializedValidators { /// disk. A newly enabled validator will be added to `self.validators`, whilst a newly disabled /// validator will be removed from `self.validators`. /// + /// If a `gas_limit` is included in the call to this function, it will also be updated and saved + /// to disk. If `gas_limit` is `None` the `gas_limit` *will not* be unset in `ValidatorDefinition` + /// or `InitializedValidator`. The same logic applies to `builder_proposals`. + /// /// Saves the `ValidatorDefinitions` to file, even if no definitions were changed. - pub async fn set_validator_status( + pub async fn set_validator_definition_fields( &mut self, voting_public_key: &PublicKey, - enabled: bool, + enabled: Option, + gas_limit: Option, + builder_proposals: Option, ) -> Result<(), Error> { if let Some(def) = self .definitions @@ -642,11 +689,33 @@ impl InitializedValidators { .iter_mut() .find(|def| def.voting_public_key == *voting_public_key) { - def.enabled = enabled; + // Don't overwrite fields if they are not set in this request. + if let Some(enabled) = enabled { + def.enabled = enabled; + } + if let Some(gas_limit) = gas_limit { + def.gas_limit = Some(gas_limit); + } + if let Some(builder_proposals) = builder_proposals { + def.builder_proposals = Some(builder_proposals); + } } self.update_validators().await?; + if let Some(val) = self + .validators + .get_mut(&PublicKeyBytes::from(voting_public_key)) + { + // Don't overwrite fields if they are not set in this request. + if let Some(gas_limit) = gas_limit { + val.gas_limit = Some(gas_limit); + } + if let Some(builder_proposals) = builder_proposals { + val.builder_proposals = Some(builder_proposals); + } + } + self.definitions .save(&self.validators_dir) .map_err(Error::UnableToSaveDefinitions)?; diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index f10142d6143..bb7b296d23e 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -362,7 +362,7 @@ impl ProductionValidatorClient { context.eth2_config.spec.clone(), doppelganger_service.clone(), slot_clock.clone(), - config.fee_recipient, + &config, context.executor.clone(), log.clone(), )); @@ -413,7 +413,6 @@ impl ProductionValidatorClient { .runtime_context(context.service_context("block".into())) .graffiti(config.graffiti) .graffiti_file(config.graffiti_file.clone()) - .private_tx_proposals(config.private_tx_proposals) .strict_fee_recipient(config.strict_fee_recipient) .build()?; @@ -430,6 +429,7 @@ impl ProductionValidatorClient { .validator_store(validator_store.clone()) .beacon_nodes(beacon_nodes.clone()) .runtime_context(context.service_context("preparation".into())) + .builder_registration_timestamp_override(config.builder_registration_timestamp_override) .build()?; let sync_committee_service = SyncCommitteeService::new( @@ -487,10 +487,7 @@ impl ProductionValidatorClient { self.preparation_service .clone() - .start_update_service( - self.config.private_tx_proposals, - &self.context.eth2_config.spec, - ) + .start_update_service(&self.context.eth2_config.spec) .map_err(|e| format!("Unable to start preparation service: {}", e))?; if let Some(doppelganger_service) = self.doppelganger_service.clone() { diff --git a/validator_client/src/preparation_service.rs b/validator_client/src/preparation_service.rs index 01dfc0ca04c..b138d3e4eeb 100644 --- a/validator_client/src/preparation_service.rs +++ b/validator_client/src/preparation_service.rs @@ -22,12 +22,16 @@ const PROPOSER_PREPARATION_LOOKAHEAD_EPOCHS: u64 = 2; /// Number of epochs to wait before re-submitting validator registration. const EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION: u64 = 1; +/// The number of validator registrations to include per request to the beacon node. +const VALIDATOR_REGISTRATION_BATCH_SIZE: usize = 500; + /// Builds an `PreparationService`. pub struct PreparationServiceBuilder { validator_store: Option>>, slot_clock: Option, beacon_nodes: Option>>, context: Option>, + builder_registration_timestamp_override: Option, } impl PreparationServiceBuilder { @@ -37,6 +41,7 @@ impl PreparationServiceBuilder { slot_clock: None, beacon_nodes: None, context: None, + builder_registration_timestamp_override: None, } } @@ -60,6 +65,14 @@ impl PreparationServiceBuilder { self } + pub fn builder_registration_timestamp_override( + mut self, + builder_registration_timestamp_override: Option, + ) -> Self { + self.builder_registration_timestamp_override = builder_registration_timestamp_override; + self + } + pub fn build(self) -> Result, String> { Ok(PreparationService { inner: Arc::new(Inner { @@ -75,6 +88,8 @@ impl PreparationServiceBuilder { context: self .context .ok_or("Cannot build PreparationService without runtime_context")?, + builder_registration_timestamp_override: self + .builder_registration_timestamp_override, validator_registration_cache: RwLock::new(HashMap::new()), }), }) @@ -87,6 +102,7 @@ pub struct Inner { slot_clock: T, beacon_nodes: Arc>, context: RuntimeContext, + builder_registration_timestamp_override: Option, // Used to track unpublished validator registration changes. validator_registration_cache: RwLock>, @@ -137,14 +153,8 @@ impl Deref for PreparationService { } impl PreparationService { - pub fn start_update_service( - self, - start_registration_service: bool, - spec: &ChainSpec, - ) -> Result<(), String> { - if start_registration_service { - self.clone().start_validator_registration_service(spec)?; - } + pub fn start_update_service(self, spec: &ChainSpec) -> Result<(), String> { + self.clone().start_validator_registration_service(spec)?; self.start_proposer_prepare_service(spec) } @@ -208,7 +218,7 @@ impl PreparationService { let validator_registration_fut = async move { loop { // Poll the endpoint immediately to ensure fee recipients are received. - if let Err(e) = self.register_validators(&spec).await { + if let Err(e) = self.register_validators().await { error!(log,"Error during validator registration";"error" => ?e); } @@ -251,35 +261,48 @@ impl PreparationService { } fn collect_preparation_data(&self, spec: &ChainSpec) -> Vec { - self.collect_data(spec, |_, validator_index, fee_recipient| { - ProposerPreparationData { - validator_index, - fee_recipient, + let log = self.context.log(); + self.collect_proposal_data(|pubkey, proposal_data| { + if let Some(fee_recipient) = proposal_data.fee_recipient { + Some(ProposerPreparationData { + // Ignore fee recipients for keys without indices, they are inactive. + validator_index: proposal_data.validator_index?, + fee_recipient, + }) + } else { + if spec.bellatrix_fork_epoch.is_some() { + error!( + log, + "Validator is missing fee recipient"; + "msg" => "update validator_definitions.yml", + "pubkey" => ?pubkey + ); + } + None } }) } - fn collect_validator_registration_keys( - &self, - spec: &ChainSpec, - ) -> Vec { - self.collect_data(spec, |pubkey, _, fee_recipient| { - ValidatorRegistrationKey { - fee_recipient, - //TODO(sean) this is geth's default, we should make this configurable and maybe have the default be dynamic. - // Discussion here: https://github.com/ethereum/builder-specs/issues/17 - gas_limit: 30_000_000, - pubkey, - } + fn collect_validator_registration_keys(&self) -> Vec { + self.collect_proposal_data(|pubkey, proposal_data| { + // We don't log for missing fee recipients here because this will be logged more + // frequently in `collect_preparation_data`. + proposal_data.fee_recipient.and_then(|fee_recipient| { + proposal_data + .builder_proposals + .then(|| ValidatorRegistrationKey { + fee_recipient, + gas_limit: proposal_data.gas_limit, + pubkey, + }) + }) }) } - fn collect_data(&self, spec: &ChainSpec, map_fn: G) -> Vec + fn collect_proposal_data(&self, map_fn: G) -> Vec where - G: Fn(PublicKeyBytes, u64, Address) -> U, + G: Fn(PublicKeyBytes, ProposalData) -> Option, { - let log = self.context.log(); - let all_pubkeys: Vec<_> = self .validator_store .voting_pubkeys(DoppelgangerStatus::ignored); @@ -287,23 +310,8 @@ impl PreparationService { all_pubkeys .into_iter() .filter_map(|pubkey| { - // Ignore fee recipients for keys without indices, they are inactive. - let validator_index = self.validator_store.validator_index(&pubkey)?; - let fee_recipient = self.validator_store.get_fee_recipient(&pubkey); - - if let Some(fee_recipient) = fee_recipient { - Some(map_fn(pubkey, validator_index, fee_recipient)) - } else { - if spec.bellatrix_fork_epoch.is_some() { - error!( - log, - "Validator is missing fee recipient"; - "msg" => "update validator_definitions.yml", - "pubkey" => ?pubkey - ); - } - None - } + let proposal_data = self.validator_store.proposal_data(&pubkey)?; + map_fn(pubkey, proposal_data) }) .collect() } @@ -341,8 +349,8 @@ impl PreparationService { } /// Register validators with builders, used in the blinded block proposal flow. - async fn register_validators(&self, spec: &ChainSpec) -> Result<(), String> { - let registration_keys = self.collect_validator_registration_keys(spec); + async fn register_validators(&self) -> Result<(), String> { + let registration_keys = self.collect_validator_registration_keys(); let mut changed_keys = vec![]; @@ -388,10 +396,15 @@ impl PreparationService { let signed_data = if let Some(signed_data) = cached_registration_opt { signed_data } else { - let timestamp = SystemTime::now() - .duration_since(UNIX_EPOCH) - .map_err(|e| format!("{e:?}"))? - .as_secs(); + let timestamp = + if let Some(timestamp) = self.builder_registration_timestamp_override { + timestamp + } else { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|e| format!("{e:?}"))? + .as_secs() + }; let ValidatorRegistrationKey { fee_recipient, @@ -426,29 +439,35 @@ impl PreparationService { } if !signed.is_empty() { - let signed_ref = signed.as_slice(); - - match self - .beacon_nodes - .first_success(RequireSynced::Yes, |beacon_node| async move { - beacon_node - .post_validator_register_validator(signed_ref) - .await - }) - .await - { - Ok(()) => debug!( - log, - "Published validator registration"; - "count" => registration_data_len, - ), - Err(e) => error!( - log, - "Unable to publish validator registration"; - "error" => %e, - ), + for batch in signed.chunks(VALIDATOR_REGISTRATION_BATCH_SIZE) { + match self + .beacon_nodes + .first_success(RequireSynced::Yes, |beacon_node| async move { + beacon_node.post_validator_register_validator(batch).await + }) + .await + { + Ok(()) => info!( + log, + "Published validator registrations to the builder network"; + "count" => registration_data_len, + ), + Err(e) => error!( + log, + "Unable to publish validator registrations to the builder network"; + "error" => %e, + ), + } } } Ok(()) } } + +/// A helper struct, used for passing data from the validator store to services. +pub struct ProposalData { + pub(crate) validator_index: Option, + pub(crate) fee_recipient: Option
, + pub(crate) gas_limit: u64, + pub(crate) builder_proposals: bool, +} diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index de39f912644..f883d0201f3 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -3,6 +3,7 @@ use crate::{ http_metrics::metrics, initialized_validators::InitializedValidators, signing_method::{Error as SigningError, SignableMessage, SigningContext, SigningMethod}, + Config, }; use account_utils::{validator_definitions::ValidatorDefinition, ZeroizeString}; use parking_lot::{Mutex, RwLock}; @@ -27,6 +28,7 @@ use types::{ use validator_dir::ValidatorDir; pub use crate::doppelganger_service::DoppelgangerStatus; +use crate::preparation_service::ProposalData; #[derive(Debug, PartialEq)] pub enum Error { @@ -52,6 +54,11 @@ impl From for Error { /// This acts as a maximum safe-guard against clock drift. const SLASHING_PROTECTION_HISTORY_EPOCHS: u64 = 512; +/// Currently used as the default gas limit in execution clients. +/// +/// https://github.com/ethereum/builder-specs/issues/17 +const DEFAULT_GAS_LIMIT: u64 = 30_000_000; + struct LocalValidator { validator_dir: ValidatorDir, voting_keypair: Keypair, @@ -87,6 +94,8 @@ pub struct ValidatorStore { doppelganger_service: Option>, slot_clock: T, fee_recipient_process: Option
, + gas_limit: Option, + builder_proposals: bool, task_executor: TaskExecutor, _phantom: PhantomData, } @@ -102,7 +111,7 @@ impl ValidatorStore { spec: ChainSpec, doppelganger_service: Option>, slot_clock: T, - fee_recipient_process: Option
, + config: &Config, task_executor: TaskExecutor, log: Logger, ) -> Self { @@ -115,7 +124,9 @@ impl ValidatorStore { log, doppelganger_service, slot_clock, - fee_recipient_process, + fee_recipient_process: config.fee_recipient, + gas_limit: config.gas_limit, + builder_proposals: config.builder_proposals, task_executor, _phantom: PhantomData, } @@ -146,6 +157,7 @@ impl ValidatorStore { /// Insert a new validator to `self`, where the validator is represented by an EIP-2335 /// keystore on the filesystem. + #[allow(clippy::too_many_arguments)] pub async fn add_validator_keystore>( &self, voting_keystore_path: P, @@ -153,12 +165,16 @@ impl ValidatorStore { enable: bool, graffiti: Option, suggested_fee_recipient: Option
, + gas_limit: Option, + builder_proposals: Option, ) -> Result { let mut validator_def = ValidatorDefinition::new_keystore_with_password( voting_keystore_path, Some(password), graffiti.map(Into::into), suggested_fee_recipient, + gas_limit, + builder_proposals, ) .map_err(|e| format!("failed to create validator definitions: {:?}", e))?; @@ -200,6 +216,23 @@ impl ValidatorStore { Ok(validator_def) } + /// Returns `ProposalData` for the provided `pubkey` if it exists in `InitializedValidators`. + /// `ProposalData` fields include defaulting logic described in `get_fee_recipient_defaulting`, + /// `get_gas_limit_defaulting`, and `get_builder_proposals_defaulting`. + pub fn proposal_data(&self, pubkey: &PublicKeyBytes) -> Option { + self.validators + .read() + .validator(pubkey) + .map(|validator| ProposalData { + validator_index: validator.get_index(), + fee_recipient: self + .get_fee_recipient_defaulting(validator.get_suggested_fee_recipient()), + gas_limit: self.get_gas_limit_defaulting(validator.get_gas_limit()), + builder_proposals: self + .get_builder_proposals_defaulting(validator.get_builder_proposals()), + }) + } + /// Attempts to resolve the pubkey to a validator index. /// /// It may return `None` if the `pubkey` is: @@ -366,9 +399,12 @@ impl ValidatorStore { pub fn get_fee_recipient(&self, validator_pubkey: &PublicKeyBytes) -> Option
{ // If there is a `suggested_fee_recipient` in the validator definitions yaml // file, use that value. - self.suggested_fee_recipient(validator_pubkey) - // If there's nothing in the file, try the process-level default value. - .or(self.fee_recipient_process) + self.get_fee_recipient_defaulting(self.suggested_fee_recipient(validator_pubkey)) + } + + pub fn get_fee_recipient_defaulting(&self, fee_recipient: Option
) -> Option
{ + // If there's nothing in the file, try the process-level default value. + fee_recipient.or(self.fee_recipient_process) } /// Returns the suggested_fee_recipient from `validator_definitions.yml` if any. @@ -379,6 +415,45 @@ impl ValidatorStore { .suggested_fee_recipient(validator_pubkey) } + /// Returns the gas limit for the given public key. The priority order for fetching + /// the gas limit is: + /// + /// 1. validator_definitions.yml + /// 2. process level gas limit + /// 3. `DEFAULT_GAS_LIMIT` + pub fn get_gas_limit(&self, validator_pubkey: &PublicKeyBytes) -> u64 { + self.get_gas_limit_defaulting(self.validators.read().gas_limit(validator_pubkey)) + } + + fn get_gas_limit_defaulting(&self, gas_limit: Option) -> u64 { + // If there is a `gas_limit` in the validator definitions yaml + // file, use that value. + gas_limit + // If there's nothing in the file, try the process-level default value. + .or(self.gas_limit) + // If there's no process-level default, use the `DEFAULT_GAS_LIMIT`. + .unwrap_or(DEFAULT_GAS_LIMIT) + } + + /// Returns a `bool` for the given public key that denotes whther this validator should use the + /// builder API. The priority order for fetching this value is: + /// + /// 1. validator_definitions.yml + /// 2. process level flag + pub fn get_builder_proposals(&self, validator_pubkey: &PublicKeyBytes) -> bool { + // If there is a `suggested_fee_recipient` in the validator definitions yaml + // file, use that value. + self.get_builder_proposals_defaulting( + self.validators.read().builder_proposals(validator_pubkey), + ) + } + + fn get_builder_proposals_defaulting(&self, builder_proposals: Option) -> bool { + builder_proposals + // If there's nothing in the file, try the process-level default value. + .unwrap_or(self.builder_proposals) + } + pub async fn sign_block>( &self, validator_pubkey: PublicKeyBytes,