diff --git a/.circleci/continue/main.yml b/.circleci/continue/main.yml index 4dee69e8e6aae..c70084d4b4407 100644 --- a/.circleci/continue/main.yml +++ b/.circleci/continue/main.yml @@ -422,7 +422,7 @@ commands: echo "export RUST_BINARY_PATH_KONA_NODE=$ROOT_DIR/rust/target/release/kona-node" >> "$BASH_ENV" echo "export RUST_BINARY_PATH_KONA_SUPERVISOR=$ROOT_DIR/rust/target/release/kona-supervisor" >> "$BASH_ENV" echo "export RUST_BINARY_PATH_OP_RBUILDER=$BIN_DIR/op-rbuilder" >> "$BASH_ENV" - echo "export RUST_BINARY_PATH_ROLLUP_BOOST=$BIN_DIR/rollup-boost" >> "$BASH_ENV" + echo "export RUST_BINARY_PATH_ROLLUP_BOOST=$ROOT_DIR/rust/target/release/rollup-boost" >> "$BASH_ENV" rust-prepare: parameters: @@ -2167,7 +2167,7 @@ jobs: echo "export RUST_BINARY_PATH_KONA_NODE=$ROOT_DIR/rust/target/release/kona-node" >> "$BASH_ENV" echo "export RUST_BINARY_PATH_KONA_SUPERVISOR=$ROOT_DIR/rust/target/release/kona-supervisor" >> "$BASH_ENV" echo "export RUST_BINARY_PATH_OP_RBUILDER=$BIN_DIR/op-rbuilder" >> "$BASH_ENV" - echo "export RUST_BINARY_PATH_ROLLUP_BOOST=$BIN_DIR/rollup-boost" >> "$BASH_ENV" + echo "export RUST_BINARY_PATH_ROLLUP_BOOST=$ROOT_DIR/rust/target/release/rollup-boost" >> "$BASH_ENV" # Restore cached Go modules - restore_cache: keys: @@ -3202,8 +3202,8 @@ workflows: save_cache: true context: - circleci-repo-readonly-authenticated-github-token - - rust-build-binary: &kona-build-release - name: kona-build-release + - rust-build-binary: &rust-build-release + name: rust-build-release directory: rust profile: "release" features: "default" @@ -3218,18 +3218,10 @@ workflows: needs_clang: true context: - circleci-repo-readonly-authenticated-github-token - - rust-build-submodule: &rust-build-rollup-boost - name: rust-build-rollup-boost - directory: rollup-boost - binaries: "rollup-boost" - build_command: cargo build --release -p rollup-boost --bin rollup-boost - context: - - circleci-repo-readonly-authenticated-github-token - rust-binaries-for-sysgo: requires: - - kona-build-release + - rust-build-release - rust-build-op-rbuilder - - rust-build-rollup-boost - go-binaries-for-sysgo # IN-MEMORY (all) - op-acceptance-tests: @@ -3539,19 +3531,17 @@ workflows: context: - circleci-repo-readonly-authenticated-github-token - rust-build-binary: - name: kona-build-release + name: rust-build-release directory: rust needs_clang: true profile: "release" context: - circleci-repo-readonly-authenticated-github-token - rust-build-submodule: *rust-build-op-rbuilder - - rust-build-submodule: *rust-build-rollup-boost - rust-binaries-for-sysgo: requires: - - kona-build-release + - rust-build-release - rust-build-op-rbuilder - - rust-build-rollup-boost - op-acceptance-tests-flake-shake: context: - circleci-repo-readonly-authenticated-github-token diff --git a/.circleci/continue/rust-ci.yml b/.circleci/continue/rust-ci.yml index 852056d981d62..939a3d21d3d19 100644 --- a/.circleci/continue/rust-ci.yml +++ b/.circleci/continue/rust-ci.yml @@ -87,7 +87,7 @@ commands: echo "export RUST_BINARY_PATH_KONA_NODE=$ROOT_DIR/rust/target/release/kona-node" >> "$BASH_ENV" echo "export RUST_BINARY_PATH_KONA_SUPERVISOR=$ROOT_DIR/rust/target/release/kona-supervisor" >> "$BASH_ENV" echo "export RUST_BINARY_PATH_OP_RBUILDER=$BIN_DIR/op-rbuilder" >> "$BASH_ENV" - echo "export RUST_BINARY_PATH_ROLLUP_BOOST=$BIN_DIR/rollup-boost" >> "$BASH_ENV" + echo "export RUST_BINARY_PATH_ROLLUP_BOOST=$ROOT_DIR/rust/target/release/rollup-boost" >> "$BASH_ENV" rust-prepare: parameters: @@ -1003,6 +1003,39 @@ jobs: destination: op-reth-docs + # Rollup Boost integration tests (requires Docker + Redis) + rollup-boost-integration-tests: + machine: + image: <> + docker_layer_caching: true + resource_class: xlarge + steps: + - utils/checkout-with-mise: + checkout-method: blobless + - rust-prepare-and-restore-cache: + directory: rust + prefix: rollup-boost-integration + - install-cargo-binstall + - run: + name: Install nextest + command: | + command -v cargo-nextest >/dev/null || cargo binstall --no-confirm cargo-nextest + - run: + name: Install redis-server + # NEEDRESTART_MODE ensures we're not prompted for restarts interactively in CI. + command: | + export NEEDRESTART_MODE=a + sudo -E apt-get update -y + sudo -E apt-get install -y redis-server + - run: + name: Run rollup-boost integration tests + working_directory: rust + no_output_timeout: 40m + command: just test-integration + - rust-save-build-cache: + directory: rust + prefix: rollup-boost-integration + # Kona Link Checker kona-link-checker: docker: @@ -1203,6 +1236,12 @@ workflows: - op-reth-windows-check: context: *rust-ci-context + # ----------------------------------------------------------------------- + # Rollup Boost crate-specific jobs + # ----------------------------------------------------------------------- + - rollup-boost-integration-tests: + context: *rust-ci-context + - rust-ci-cargo-tests: name: op-reth-integration-tests directory: rust diff --git a/.gitmodules b/.gitmodules index 1856b3a961a7d..fa3daeae26ef1 100644 --- a/.gitmodules +++ b/.gitmodules @@ -38,6 +38,3 @@ [submodule "op-rbuilder"] path = op-rbuilder url = https://github.com/flashbots/op-rbuilder -[submodule "rollup-boost"] - path = rollup-boost - url = https://github.com/flashbots/rollup-boost diff --git a/justfile b/justfile index 12e2674c06c2a..8422fb22885a8 100644 --- a/justfile +++ b/justfile @@ -1,8 +1,7 @@ # Build all Rust binaries (release) for sysgo tests. build-rust-release: - cd rust && cargo build --release --bin kona-node --bin kona-supervisor + cd rust && cargo build --release --bin kona-node --bin kona-supervisor --bin rollup-boost cd op-rbuilder && cargo build --release -p op-rbuilder --bin op-rbuilder - cd rollup-boost && cargo build --release -p rollup-boost --bin rollup-boost # Checks that TODO comments have corresponding issues. todo-checker: diff --git a/op-acceptance-tests/tests/flashblocks/flashblocks_stream_test.go b/op-acceptance-tests/tests/flashblocks/flashblocks_stream_test.go index 377e201d68943..d99c62a9554ec 100644 --- a/op-acceptance-tests/tests/flashblocks/flashblocks_stream_test.go +++ b/op-acceptance-tests/tests/flashblocks/flashblocks_stream_test.go @@ -42,7 +42,7 @@ func TestFlashblocksStream(gt *testing.T) { ctx, span := tracer.Start(ctx, "test chains") defer span.End() - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + ctx, cancel := context.WithTimeout(ctx, 2*time.Minute) defer cancel() if flashblocksStreamRate == "" { @@ -96,6 +96,8 @@ func TestFlashblocksStream(gt *testing.T) { listening := true for listening { select { + case <-ctx.Done(): + t.Errorf("timed out waiting for flashblocks streams to complete: %v", ctx.Err()) case <-doneListening: doneListening = nil case <-builderDone: diff --git a/op-devstack/sysgo/l2_cl_kona.go b/op-devstack/sysgo/l2_cl_kona.go index ac154c1cbe2e9..51ff6f4c997ce 100644 --- a/op-devstack/sysgo/l2_cl_kona.go +++ b/op-devstack/sysgo/l2_cl_kona.go @@ -278,7 +278,7 @@ func withKonaNode(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack } execPath, err := EnsureRustBinary(p, RustBinarySpec{ - SrcDir: "kona", + SrcDir: "rust", Package: "kona-node", Binary: "kona-node", }) diff --git a/op-devstack/sysgo/rollup_boost.go b/op-devstack/sysgo/rollup_boost.go index bbbee9a1f4c29..f52598c3d7177 100644 --- a/op-devstack/sysgo/rollup_boost.go +++ b/op-devstack/sysgo/rollup_boost.go @@ -132,7 +132,7 @@ func (r *RollupBoostNode) Start() { r.sub = NewSubProcess(r.p, stdOut, stdErr) execPath, err := EnsureRustBinary(r.p, RustBinarySpec{ - SrcDir: "rollup-boost", + SrcDir: "rust", Package: "rollup-boost", Binary: "rollup-boost", }) diff --git a/op-devstack/sysgo/rust_binary.go b/op-devstack/sysgo/rust_binary.go index cd74a11e897c9..e1f5a09112380 100644 --- a/op-devstack/sysgo/rust_binary.go +++ b/op-devstack/sysgo/rust_binary.go @@ -14,7 +14,7 @@ import ( // RustBinarySpec describes a Rust binary to be built and located. type RustBinarySpec struct { - SrcDir string // directory name relative to monorepo root, e.g. "rollup-boost" + SrcDir string // directory name relative to monorepo root, e.g. "rust" or "op-rbuilder" Package string // cargo package name, e.g. "rollup-boost" Binary string // binary name, e.g. "rollup-boost" } diff --git a/op-devstack/sysgo/supervisor_kona.go b/op-devstack/sysgo/supervisor_kona.go index 926c87b225528..7e5c9a3c36cca 100644 --- a/op-devstack/sysgo/supervisor_kona.go +++ b/op-devstack/sysgo/supervisor_kona.go @@ -159,7 +159,7 @@ func WithKonaSupervisor(supervisorID stack.SupervisorID, clusterID stack.Cluster } execPath, err := EnsureRustBinary(p, RustBinarySpec{ - SrcDir: "kona", + SrcDir: "rust", Package: "kona-supervisor", Binary: "kona-supervisor", }) diff --git a/rollup-boost b/rollup-boost deleted file mode 160000 index 196237bab2a02..0000000000000 --- a/rollup-boost +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 196237bab2a02298de994b439e0455abb1ac512f diff --git a/rust/Cargo.lock b/rust/Cargo.lock index 5646536d30543..3db5fafd3ca4a 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -1068,6 +1068,21 @@ dependencies = [ "arbitrary", ] +[[package]] +name = "arc-swap" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ded5f9a03ac8f24d1b8a25101ee812cd32cdc8c50a4c50237de2c4915850e73" +dependencies = [ + "rustversion", +] + +[[package]] +name = "arcstr" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03918c3dbd7701a85c6b9887732e2921175f26c350b4563841d0958c21d57e6d" + [[package]] name = "ark-bls12-381" version = "0.5.0" @@ -1622,14 +1637,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" dependencies = [ "async-trait", - "axum-core", + "axum-core 0.4.5", "bytes", "futures-util", "http", "http-body", "http-body-util", "itoa", - "matchit", + "matchit 0.7.3", "memchr", "mime", "percent-encoding", @@ -1642,6 +1657,42 @@ dependencies = [ "tower-service", ] +[[package]] +name = "axum" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b52af3cb4058c895d37317bb27508dccc8e5f2d39454016b297bf4a400597b8" +dependencies = [ + "axum-core 0.5.6", + "base64 0.22.1", + "bytes", + "form_urlencoded", + "futures-util", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-util", + "itoa", + "matchit 0.8.4", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "serde_core", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sha1", + "sync_wrapper", + "tokio", + "tokio-tungstenite 0.28.0", + "tower 0.5.3", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "axum-core" version = "0.4.5" @@ -1662,6 +1713,25 @@ dependencies = [ "tower-service", ] +[[package]] +name = "axum-core" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "sync_wrapper", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "backoff" version = "0.4.0" @@ -2051,6 +2121,56 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "bollard" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97ccca1260af6a459d75994ad5acc1651bcabcbdbc41467cc9786519ab854c30" +dependencies = [ + "base64 0.22.1", + "bollard-stubs", + "bytes", + "futures-core", + "futures-util", + "hex", + "home", + "http", + "http-body-util", + "hyper", + "hyper-named-pipe", + "hyper-rustls", + "hyper-util", + "hyperlocal", + "log", + "pin-project-lite", + "rustls", + "rustls-native-certs", + "rustls-pemfile", + "rustls-pki-types", + "serde", + "serde_derive", + "serde_json", + "serde_repr", + "serde_urlencoded", + "thiserror 2.0.18", + "tokio", + "tokio-util", + "tower-service", + "url", + "winapi", +] + +[[package]] +name = "bollard-stubs" +version = "1.47.1-rc.27.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f179cfbddb6e77a5472703d4b30436bff32929c0aa8a9008ecf23d1d3cdd0da" +dependencies = [ + "serde", + "serde_repr", + "serde_with", +] + [[package]] name = "borsh" version = "1.6.0" @@ -2901,6 +3021,22 @@ dependencies = [ "typenum", ] +[[package]] +name = "ctor" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec09e802f5081de6157da9a75701d6c713d8dc3ba52571fd4bd25f412644e8a6" +dependencies = [ + "ctor-proc-macro", + "dtor", +] + +[[package]] +name = "ctor-proc-macro" +version = "0.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2931af7e13dc045d8e9d26afccc6fa115d64e115c9c84b1166288b46f6782c2" + [[package]] name = "ctr" version = "0.9.2" @@ -3335,6 +3471,17 @@ dependencies = [ "syn 2.0.114", ] +[[package]] +name = "docker_credential" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d89dfcba45b4afad7450a99b39e751590463e45c04728cf555d36bb66940de8" +dependencies = [ + "base64 0.21.7", + "serde", + "serde_json", +] + [[package]] name = "doctest-file" version = "1.0.0" @@ -3368,6 +3515,21 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c3cf4824e2d5f025c7b531afcb2325364084a16806f6d47fbc1f5fbd9960590" +[[package]] +name = "dtor" +version = "0.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97cbdf2ad6846025e8e25df05171abfb30e3ababa12ee0a0e44b9bbe570633a8" +dependencies = [ + "dtor-proc-macro", +] + +[[package]] +name = "dtor-proc-macro" +version = "0.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7454e41ff9012c00d53cf7f475c5e3afa3b91b7c90568495495e8d9bf47a1055" + [[package]] name = "dunce" version = "1.0.5" @@ -3488,12 +3650,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" -[[package]] -name = "endian-type" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d" - [[package]] name = "enr" version = "0.13.0" @@ -3582,6 +3738,17 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "etcetera" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943" +dependencies = [ + "cfg-if", + "home", + "windows-sys 0.48.0", +] + [[package]] name = "ethereum_hashing" version = "0.7.0" @@ -3939,6 +4106,36 @@ version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" +[[package]] +name = "flashblocks-websocket-proxy" +version = "0.1.0" +dependencies = [ + "axum 0.8.8", + "backoff", + "brotli", + "clap", + "dotenvy", + "futures", + "hostname", + "http", + "metrics", + "metrics-derive", + "metrics-exporter-prometheus", + "redis 0.30.0", + "redis-test", + "reqwest 0.13.2", + "serde_json", + "testcontainers", + "testcontainers-modules", + "thiserror 2.0.18", + "tokio", + "tokio-tungstenite 0.28.0", + "tokio-util", + "tracing", + "tracing-subscriber 0.3.22", + "uuid", +] + [[package]] name = "flate2" version = "1.1.9" @@ -4583,6 +4780,26 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "home" +version = "0.5.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc627f471c528ff0c4a49e1d5e60450c8f6461dd6d10ba9dcd3a61d3dff7728d" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "hostname" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "617aaa3557aef3810a6369d0a99fac8a080891b68bd9f9812a1eeda0c0730cbd" +dependencies = [ + "cfg-if", + "libc", + "windows-link", +] + [[package]] name = "http" version = "1.4.0" @@ -4713,6 +4930,21 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-named-pipe" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73b7d8abf35697b81a825e386fc151e0d503e8cb5fcb93cc8669c376dfd6f278" +dependencies = [ + "hex", + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", + "winapi", +] + [[package]] name = "hyper-rustls" version = "0.27.7" @@ -4787,6 +5019,21 @@ dependencies = [ "windows-registry", ] +[[package]] +name = "hyperlocal" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "986c5ce3b994526b3cd75578e62554abd09f0899d6206de48b3e96ab34ccc8c7" +dependencies = [ + "hex", + "http-body-util", + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + [[package]] name = "iana-time-zone" version = "0.1.65" @@ -5535,7 +5782,7 @@ dependencies = [ "kona-registry", "libc", "libp2p", - "metrics-exporter-prometheus 0.18.1", + "metrics-exporter-prometheus", "metrics-process", "rstest", "serde", @@ -5681,7 +5928,7 @@ dependencies = [ "kona-protocol", "kona-registry", "metrics", - "metrics-exporter-prometheus 0.18.1", + "metrics-exporter-prometheus", "op-alloy-consensus", "op-alloy-network", "op-alloy-provider", @@ -7184,6 +7431,12 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" +[[package]] +name = "matchit" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" + [[package]] name = "memchr" version = "2.8.0" @@ -7229,27 +7482,6 @@ dependencies = [ "syn 2.0.114", ] -[[package]] -name = "metrics-exporter-prometheus" -version = "0.16.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd7399781913e5393588a8d8c6a2867bf85fb38eaf2502fdce465aad2dc6f034" -dependencies = [ - "base64 0.22.1", - "http-body-util", - "hyper", - "hyper-rustls", - "hyper-util", - "indexmap 2.13.0", - "ipnet", - "metrics", - "metrics-util 0.19.1", - "quanta", - "thiserror 1.0.69", - "tokio", - "tracing", -] - [[package]] name = "metrics-exporter-prometheus" version = "0.18.1" @@ -7263,7 +7495,7 @@ dependencies = [ "indexmap 2.13.0", "ipnet", "metrics", - "metrics-util 0.20.1", + "metrics-util", "quanta", "thiserror 2.0.18", "tokio", @@ -7286,26 +7518,6 @@ dependencies = [ "windows 0.62.2", ] -[[package]] -name = "metrics-util" -version = "0.19.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8496cc523d1f94c1385dd8f0f0c2c480b2b8aeccb5b7e4485ad6365523ae376" -dependencies = [ - "aho-corasick", - "crossbeam-epoch", - "crossbeam-utils", - "hashbrown 0.15.5", - "indexmap 2.13.0", - "metrics", - "ordered-float", - "quanta", - "radix_trie", - "rand 0.9.2", - "rand_xoshiro", - "sketches-ddsketch", -] - [[package]] name = "metrics-util" version = "0.20.1" @@ -7605,15 +7817,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "nibble_vec" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a5d83df9f36fe23f0c3648c6bbb8b0298bb5f1939c8f2704431371f4b84d43" -dependencies = [ - "smallvec", -] - [[package]] name = "nix" version = "0.26.4" @@ -8182,7 +8385,6 @@ dependencies = [ "thiserror 2.0.18", "tokio", "tonic 0.12.3", - "tracing", ] [[package]] @@ -8252,11 +8454,9 @@ dependencies = [ "opentelemetry 0.28.0", "percent-encoding", "rand 0.8.5", - "serde_json", "thiserror 2.0.18", "tokio", "tokio-stream", - "tracing", ] [[package]] @@ -8280,15 +8480,6 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" -[[package]] -name = "ordered-float" -version = "4.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bb71e1b3fa6ca1c61f383464aaf2bb0e2f8e772a1f01d486832464de363b951" -dependencies = [ - "num-traits", -] - [[package]] name = "p256" version = "0.13.2" @@ -8381,6 +8572,31 @@ dependencies = [ "windows-link", ] +[[package]] +name = "parse-display" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "914a1c2265c98e2446911282c6ac86d8524f495792c38c5bd884f80499c7538a" +dependencies = [ + "parse-display-derive", + "regex", + "regex-syntax", +] + +[[package]] +name = "parse-display-derive" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ae7800a4c974efd12df917266338e79a7a74415173caf7e70aa0a0707345281" +dependencies = [ + "proc-macro2", + "quote", + "regex", + "regex-syntax", + "structmeta", + "syn 2.0.114", +] + [[package]] name = "paste" version = "1.0.15" @@ -9041,16 +9257,6 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" -[[package]] -name = "radix_trie" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c069c179fcdc6a2fe24d8d18305cf085fdbd4f922c041943e203685d6a1c58fd" -dependencies = [ - "endian-type", - "nibble_vec", -] - [[package]] name = "rancor" version = "0.1.1" @@ -9261,6 +9467,60 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3edd4d5d42c92f0a659926464d4cce56b562761267ecf0f469d85b7de384175" +[[package]] +name = "redis" +version = "0.30.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "438a4e5f8e9aa246d6f3666d6978441bf1b37d5f417b50c4dd220be09f5fcc17" +dependencies = [ + "arc-swap", + "combine", + "itoa", + "num-bigint", + "percent-encoding", + "ryu", + "sha1_smol", + "socket2 0.5.10", + "url", +] + +[[package]] +name = "redis" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e969d1d702793536d5fda739a82b88ad7cbe7d04f8386ee8cd16ad3eff4854a5" +dependencies = [ + "arcstr", + "combine", + "itoa", + "percent-encoding", + "ryu", + "socket2 0.6.2", + "url", + "xxhash-rust", +] + +[[package]] +name = "redis-test" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5143ae9e73f2ff0f3509af5e3a056b48bac2d1e1caa093257f20a9e68ef7534f" +dependencies = [ + "rand 0.9.2", + "redis 1.0.3", + "socket2 0.6.2", + "tempfile", +] + +[[package]] +name = "redox_syscall" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "redox_syscall" version = "0.5.18" @@ -9431,13 +9691,17 @@ dependencies = [ "http-body", "http-body-util", "hyper", + "hyper-tls", "hyper-util", "js-sys", "log", + "native-tls", "percent-encoding", "pin-project-lite", + "rustls-pki-types", "sync_wrapper", "tokio", + "tokio-native-tls", "tower 0.5.3", "tower-http", "tower-service", @@ -11171,9 +11435,9 @@ dependencies = [ "jsonrpsee-server", "mappings", "metrics", - "metrics-exporter-prometheus 0.18.1", + "metrics-exporter-prometheus", "metrics-process", - "metrics-util 0.20.1", + "metrics-util", "pprof_util", "procfs", "reqwest 0.12.28", @@ -13072,18 +13336,20 @@ dependencies = [ [[package]] name = "rollup-boost" version = "0.7.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d1d7c635dec67c86346eb871e8a22dd1596c33d4a96a9a4926b4d2fd703b63" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-rpc-types-engine", "alloy-rpc-types-eth", "alloy-serde", + "anyhow", "backoff", "blake3", "bytes", "clap", + "ctor", "dashmap", "dotenvy", "ed25519-dalek", @@ -13099,23 +13365,30 @@ dependencies = [ "lru 0.16.3", "metrics", "metrics-derive", - "metrics-exporter-prometheus 0.16.2", - "metrics-util 0.19.1", + "metrics-exporter-prometheus", + "metrics-util", "moka", + "op-alloy-consensus", "op-alloy-rpc-types-engine", "opentelemetry 0.28.0", "opentelemetry-otlp 0.28.0", "opentelemetry_sdk 0.28.0", "parking_lot", "paste", + "rand 0.9.2", + "reqwest 0.13.2", "rollup-boost-types", "rustls", "serde", "serde_json", + "serial_test", "sha2", + "tempfile", + "testcontainers", "thiserror 2.0.18", + "time", "tokio", - "tokio-tungstenite 0.26.2", + "tokio-tungstenite 0.28.0", "tokio-util", "tower 0.5.3", "tower-http", @@ -13131,8 +13404,6 @@ dependencies = [ [[package]] name = "rollup-boost-types" version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "756f32c7f241ab6d91d823e94d20f6e0729bfcaec3b545bd30f33b24e50f5821" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -13336,6 +13607,15 @@ dependencies = [ "security-framework 3.5.1", ] +[[package]] +name = "rustls-pemfile" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "rustls-pki-types" version = "1.14.0" @@ -13697,6 +13977,17 @@ dependencies = [ "zmij", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10a9ff822e371bb5403e391ecd83e182e0e77ba7f6fe0160b795797109d1b457" +dependencies = [ + "itoa", + "serde", + "serde_core", +] + [[package]] name = "serde_regex" version = "1.1.0" @@ -13817,6 +14108,12 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "sha1_smol" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbfa15b3dddfee50a0fff136974b3e1bde555604ba463834a7eb7deb6417705d" + [[package]] name = "sha2" version = "0.10.9" @@ -14103,6 +14400,29 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" +[[package]] +name = "structmeta" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e1575d8d40908d70f6fd05537266b90ae71b15dbbe7a8b7dffa2b759306d329" +dependencies = [ + "proc-macro2", + "quote", + "structmeta-derive", + "syn 2.0.114", +] + +[[package]] +name = "structmeta-derive" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "152a0b65a590ff6c3da95cabe2353ee04e6167c896b28e3b14478c2636c922fc" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.114", +] + [[package]] name = "strum" version = "0.27.2" @@ -14355,6 +14675,44 @@ dependencies = [ "test-case-core", ] +[[package]] +name = "testcontainers" +version = "0.23.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59a4f01f39bb10fc2a5ab23eb0d888b1e2bb168c157f61a1b98e6c501c639c74" +dependencies = [ + "async-trait", + "bollard", + "bollard-stubs", + "bytes", + "docker_credential", + "either", + "etcetera", + "futures", + "log", + "memchr", + "parse-display", + "pin-project-lite", + "serde", + "serde_json", + "serde_with", + "thiserror 2.0.18", + "tokio", + "tokio-stream", + "tokio-tar", + "tokio-util", + "url", +] + +[[package]] +name = "testcontainers-modules" +version = "0.11.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d43ed4e8f58424c3a2c6c56dbea6643c3c23e8666a34df13c54f0a184e6c707" +dependencies = [ + "testcontainers", +] + [[package]] name = "testing_table" version = "0.3.0" @@ -14589,6 +14947,21 @@ dependencies = [ "tokio-util", ] +[[package]] +name = "tokio-tar" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d5714c010ca3e5c27114c1cdeb9d14641ace49874aa5626d7149e47aedace75" +dependencies = [ + "filetime", + "futures-core", + "libc", + "redox_syscall 0.3.5", + "tokio", + "tokio-stream", + "xattr", +] + [[package]] name = "tokio-tungstenite" version = "0.26.2" @@ -14597,11 +14970,9 @@ checksum = "7a9daff607c6d2bf6c16fd681ccb7eecc83e4e2cdc1ca067ffaadfca5de7f084" dependencies = [ "futures-util", "log", - "native-tls", "rustls", "rustls-pki-types", "tokio", - "tokio-native-tls", "tokio-rustls", "tungstenite 0.26.2", "webpki-roots 0.26.11", @@ -14615,7 +14986,9 @@ checksum = "d25a406cddcc431a75d3d9afc6a7c0f7428d4891dd973e4d54c56b46127bf857" dependencies = [ "futures-util", "log", + "native-tls", "tokio", + "tokio-native-tls", "tungstenite 0.28.0", ] @@ -14693,7 +15066,7 @@ checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" dependencies = [ "async-stream", "async-trait", - "axum", + "axum 0.7.9", "base64 0.22.1", "bytes", "h2", @@ -15099,7 +15472,6 @@ dependencies = [ "http", "httparse", "log", - "native-tls", "rand 0.9.2", "rustls", "rustls-pki-types", @@ -15119,6 +15491,7 @@ dependencies = [ "http", "httparse", "log", + "native-tls", "rand 0.9.2", "sha1", "thiserror 2.0.18", @@ -16291,6 +16664,12 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0637d3a5566a82fa5214bae89087bc8c9fb94cd8e8a3c07feb691bb8d9c632db" +[[package]] +name = "xxhash-rust" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdd20c5420375476fbd4394763288da7eb0cc0b8c11deed431a91562af7335d3" + [[package]] name = "yamux" version = "0.12.1" diff --git a/rust/Cargo.toml b/rust/Cargo.toml index ef5b20677543e..bc3ea0b296c71 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -44,6 +44,11 @@ members = [ "op-reth/crates/txpool/", "op-reth/examples/*", + # Rollup Boost + "rollup-boost/crates/rollup-boost", + "rollup-boost/crates/websocket-proxy", + "rollup-boost/crates/rollup-boost-types", + # Alloy OP EVM "alloy-op-evm/", @@ -56,6 +61,7 @@ default-members = [ "kona/bin/node", "kona/bin/supervisor", "op-reth/bin/", + "rollup-boost/crates/rollup-boost", ] # ==================== WORKSPACE METADATA ==================== @@ -232,7 +238,6 @@ kona-host = { path = "kona/bin/host", version = "1.0.2", default-features = fals kona-client = { path = "kona/bin/client", version = "1.0.2", default-features = false } # Protocol -kona-comp = { path = "kona/crates/batcher/comp", version = "0.4.5", default-features = false } kona-derive = { path = "kona/crates/protocol/derive", version = "0.4.5", default-features = false } kona-interop = { path = "kona/crates/protocol/interop", version = "0.4.5", default-features = false } kona-genesis = { path = "kona/crates/protocol/genesis", version = "0.4.5", default-features = false } @@ -487,6 +492,8 @@ c-kzg = { version = "2.1.5", default-features = false } enr = { version = "0.13", default-features = false } k256 = { version = "0.13", default-features = false, features = ["ecdsa"] } sha2 = { version = "0.10.9", default-features = false } +blake3 = "1" +ed25519-dalek = { version = "2", features = ["serde"] } secp256k1 = { version = "0.31.1", default-features = false } ark-ff = { version = "0.5.0", default-features = false } ark-bls12-381 = { version = "0.5.0", default-features = false } @@ -522,6 +529,10 @@ tracing = { version = "0.1.44", default-features = false } tracing-appender = "0.2.4" tracing-loki = "0.2.6" tracing-subscriber = { version = "0.3.22", default-features = false } +opentelemetry = { version = "0.28.0", default-features = false } +opentelemetry-otlp = { version = "0.28.0", default-features = false } +opentelemetry_sdk = { version = "0.28.0", default-features = false } +tracing-opentelemetry = "0.29.0" # ==================== TESTING ==================== arbitrary = { version = "1.4.2", features = ["derive"] } @@ -537,10 +548,17 @@ proptest-derive = "0.7" proptest-arbitrary-interop = "0.1.0" rstest = "0.26.1" similar-asserts = { version = "1.7.0", features = ["serde"] } -tempfile = "3.24.0" +assert_cmd = "2.0.10" +ctor = "0.4.1" +predicates = "3.1.2" +redis-test = "1" serial_test = "3" +tempfile = "3.24.0" test-case = "3" test-fuzz = "7.2.5" +testcontainers = "0.23" +testcontainers-modules = { version = "0.11", features = ["redis"] } +time = { version = "0.3.36", features = ["macros", "formatting", "parsing"] } # ==================== COMPRESSION ==================== alloc-no-stdlib = "2.0.4" @@ -559,9 +577,14 @@ ipnet = "2.11.0" libp2p = "0.56.0" libp2p-stream = "0.4.0-alpha" libp2p-identity = "0.2.13" +axum = "0.8.1" +dotenvy = "0.15.7" +hostname = "0.4.0" +hyper-rustls = "0.27.0" openssl = "0.10.75" # ==================== MISC ==================== +backoff = "0.4.0" ambassador = "0.5.0" anyhow = { version = "1.0.101", default-features = false } aquamarine = "0.6" @@ -592,6 +615,7 @@ eyre = "0.6.12" fdlimit = "0.3.0" fixed-map = { version = "0.9", default-features = false } glob = "0.3.3" +hex = "0.4" http = "1.4.0" http-body = "1.0" http-body-util = "0.1.3" @@ -623,9 +647,11 @@ rand_08 = { package = "rand", version = "0.8" } ratatui = "0.30.0" rayon = "1.11.0" reqwest = { version = "0.13.2", default-features = false } +redis = "0.30.0" ringbuffer = "0.16.0" -rollup-boost = "0.7.13" -rollup-boost-types = "0.1.0" +rollup-boost = { path = "rollup-boost/crates/rollup-boost" } +rollup-boost-types = { version = "0.1.0", path = "rollup-boost/crates/rollup-boost-types" } +flashblocks-websocket-proxy = { path = "rollup-boost/crates/websocket-proxy" } rustc-hash = { version = "2.1", default-features = false } rustls = { version = "0.23", default-features = false } rustls-pemfile = { version = "2.2", default-features = false } @@ -642,6 +668,7 @@ tar = "0.4.44" thiserror = { version = "2.0.18", default-features = false } unsigned-varint = "0.8.0" url = { version = "2.5.8", default-features = false } +uuid = { version = "1.17.0", features = ["v4"] } vergen = "9.1.0" vergen-git2 = "9.1.0" byteorder = "1" @@ -670,3 +697,6 @@ op-alloy-rpc-types = { path = "op-alloy/crates/rpc-types" } op-alloy = { path = "op-alloy/crates/op-alloy" } # Duplicated by: alloy-evm (crates.io) alloy-op-hardforks = { path = "alloy-op-hardforks/" } +# Rollup Boost local paths +rollup-boost = { path = "rollup-boost/crates/rollup-boost" } +rollup-boost-types = { path = "rollup-boost/crates/rollup-boost-types" } diff --git a/rust/deny.toml b/rust/deny.toml index b8bd5086ad1dd..484859241ff63 100644 --- a/rust/deny.toml +++ b/rust/deny.toml @@ -17,6 +17,10 @@ ignore = [ "RUSTSEC-2025-0012", # bincode is unmaintained but still functional; transitive dep from reth-nippy-jar and test-fuzz. "RUSTSEC-2025-0141", + # rustls-pemfile unmaintained; dev-dep via testcontainers -> bollard. + "RUSTSEC-2025-0134", + # tokio-tar PAX header vulnerability; dev-dep via testcontainers. No fix available. + "RUSTSEC-2025-0111", # https://rustsec.org/advisories/RUSTSEC-2026-0002 lru unused directly: https://github.com/alloy-rs/alloy/pull/3460 "RUSTSEC-2026-0002", ] @@ -104,7 +108,6 @@ allow-git = [ "https://github.com/paradigmxyz/jsonrpsee", "https://github.com/paradigmxyz/revm-inspectors", "https://github.com/foundry-rs/block-explorers", - "https://github.com/flashbots/rollup-boost", ] unknown-registry = "warn" unknown-git = "deny" diff --git a/rust/justfile b/rust/justfile index 4b109a8643f59..5723b185f170d 100644 --- a/rust/justfile +++ b/rust/justfile @@ -37,9 +37,14 @@ build-op-reth: # Run all tests (unit + doc tests) test: test-unit test-docs -# Run unit tests (excluding online tests) +# Run unit tests (excluding online and integration tests) test-unit *args="-E '!test(test_online)'": - cargo nextest run --workspace --all-features {{args}} + cargo nextest run --workspace --all-features --exclude rollup-boost --exclude flashblocks-websocket-proxy {{args}} + cargo nextest run -p rollup-boost -p flashblocks-websocket-proxy {{args}} + +# Run integration tests (requires Docker and redis-server) +test-integration *args='': + cargo nextest run -p rollup-boost -p flashblocks-websocket-proxy --features integration {{args}} # Run online tests only test-online: diff --git a/rust/kona/crates/node/service/tests/rollup_boost_missing_jwt.rs b/rust/kona/crates/node/service/tests/rollup_boost_missing_jwt.rs index ba2a06032e2ec..143f489c23d3c 100644 --- a/rust/kona/crates/node/service/tests/rollup_boost_missing_jwt.rs +++ b/rust/kona/crates/node/service/tests/rollup_boost_missing_jwt.rs @@ -32,7 +32,7 @@ mod tests { }, // Default is ExecutionMode::Enabled in the crate; rely on that or set explicitly if // needed. - flashblocks_ws: Some(FlashblocksWsArgs { + flashblocks_ws: FlashblocksWsArgs { flashblocks_ws: true, flashblocks_builder_url: "ws://127.0.0.1:1111".parse().unwrap(), flashblocks_host: "127.0.0.1".to_string(), @@ -44,7 +44,7 @@ mod tests { flashblock_builder_ws_ping_interval_ms: 500, flashblock_builder_ws_pong_timeout_ms: 1500, }, - }), + }, flashblocks_p2p: None, block_selection_policy: None, execution_mode: ExecutionMode::Enabled, diff --git a/rust/op-reth/justfile b/rust/op-reth/justfile index 497347fe8c3fe..1d38671cbddad 100644 --- a/rust/op-reth/justfile +++ b/rust/op-reth/justfile @@ -13,6 +13,7 @@ test edge='': RUST_BACKTRACE=1 cargo nextest run \ --features "asm-keccak {{edge}}" --locked \ --workspace \ + --exclude rollup-boost --exclude flashblocks-websocket-proxy --exclude rollup-boost-types \ --no-tests=warn \ -E "!kind(test) and not binary(e2e_testsuite) and not test(test_online)" diff --git a/rust/rollup-boost/.dockerignore b/rust/rollup-boost/.dockerignore new file mode 100644 index 0000000000000..fd4efc2258d8d --- /dev/null +++ b/rust/rollup-boost/.dockerignore @@ -0,0 +1,29 @@ +# Version control +.github +.gitignore + +# Include for vergen constants +!/.git + +# Build artifacts +**/target +integration_logs + +# Development files +.vscode +.env +**/*.log + + +# Cargo artifacts +**/.cargo +**/Cargo.lock + +# Documentation +**/*.md +docs/ + +# Scripts that aren't needed for runtime +scripts/ +tests/ +**/integration \ No newline at end of file diff --git a/rust/rollup-boost/.env.example b/rust/rollup-boost/.env.example new file mode 100644 index 0000000000000..e38739aaee190 --- /dev/null +++ b/rust/rollup-boost/.env.example @@ -0,0 +1,32 @@ +# RPC Client Args +BUILDER_URL=http://localhost:8552 +BUILDER_JWT_TOKEN=688f5d737bad920bdfb2fc2f488d6b6209eebda1dae949a8de91398d932c517a +# Optional +# BUILDER_JWT_PATH= +BUILDER_TIMEOUT=1000 + +L2_URL=http://localhost:8551 +L2_JWT_TOKEN=688f5d737bad920bdfb2fc2f488d6b6209eebda1dae949a8de91398d932c517a +# Optional +# L2_JWT_PATH= +L2_TIMEOUT=1000 + +# RPC Server Args +RPC_HOST=0.0.0.0 +RPC_PORT=8081 + +# Debug Server Args +DEBUG_HOST=127.0.0.1 +DEBUG_SERVER_PORT=5555 + +# Flashblocks Args +FLASHBLOCKS=false +FLASHBLOCKS_BUILDER_URL=ws://localhost:1111 +FLASHBLOCKS_HOST=127.0.0.1 +FLASHBLOCKS_PORT=1112 + +# Extra Args +TRACING=false +LOG_LEVEL=info +METRICS=false +LOG_FORMAT=text diff --git a/rust/rollup-boost/.gitignore b/rust/rollup-boost/.gitignore new file mode 100644 index 0000000000000..861c887544114 --- /dev/null +++ b/rust/rollup-boost/.gitignore @@ -0,0 +1,8 @@ +/target +/.idea/ +/.vscode/ +.env +websocket-proxy/.env +websocket-proxy/target +integration_logs +.vscode diff --git a/rust/rollup-boost/Dockerfile b/rust/rollup-boost/Dockerfile new file mode 100644 index 0000000000000..382cca0213ac5 --- /dev/null +++ b/rust/rollup-boost/Dockerfile @@ -0,0 +1,72 @@ +# +# Base container (with sccache and cargo-chef) +# +# - https://github.com/mozilla/sccache +# - https://github.com/LukeMathWalker/cargo-chef +# +# Based on https://depot.dev/blog/rust-dockerfile-best-practices +# +FROM rust:1.88.0 AS base + +ARG FEATURES +ARG RELEASE=true + +RUN cargo install sccache --version ^0.9 +RUN cargo install cargo-chef --version ^0.1 + +RUN apt-get update \ + && apt-get install -y clang libclang-dev gcc + +ENV CARGO_HOME=/usr/local/cargo +ENV RUSTC_WRAPPER=sccache +ENV SCCACHE_DIR=/sccache + +# +# Planner container (running "cargo chef prepare") +# +FROM base AS planner +WORKDIR /app + +COPY . . + +RUN --mount=type=cache,target=/usr/local/cargo/registry \ + --mount=type=cache,target=/usr/local/cargo/git \ + --mount=type=cache,target=$SCCACHE_DIR,sharing=locked \ + cargo chef prepare --recipe-path recipe.json + +# +# Builder container (running "cargo chef cook" and "cargo build --release") +# +FROM base AS builder +WORKDIR /app +# Default binary filename +ARG SERVICE_NAME="rollup-boost" +COPY --from=planner /app/recipe.json recipe.json + +RUN --mount=type=cache,target=$SCCACHE_DIR,sharing=locked \ + PROFILE_FLAG=$([ "$RELEASE" = "true" ] && echo "--release" || echo "") && \ + cargo chef cook $PROFILE_FLAG --recipe-path recipe.json + +COPY . . + +RUN --mount=type=cache,target=/usr/local/cargo/registry \ + --mount=type=cache,target=/usr/local/cargo/git \ + --mount=type=cache,target=$SCCACHE_DIR,sharing=locked \ + PROFILE_FLAG=$([ "$RELEASE" = "true" ] && echo "--release" || echo "") && \ + TARGET_DIR=$([ "$RELEASE" = "true" ] && echo "release" || echo "debug") && \ + cargo build $PROFILE_FLAG --features="$FEATURES" --package=${SERVICE_NAME}; \ + cp target/$TARGET_DIR/${SERVICE_NAME} /tmp/final_binary + +# +# Runtime container +# +FROM gcr.io/distroless/cc-debian12 +WORKDIR /app + +ARG SERVICE_NAME="rollup-boost" +# Copy binary with its proper service name +COPY --from=builder /tmp/final_binary /usr/local/bin/${SERVICE_NAME} +# Also copy as a fixed entrypoint name +COPY --from=builder /tmp/final_binary /usr/local/bin/entrypoint + +ENTRYPOINT ["/usr/local/bin/entrypoint"] diff --git a/rust/rollup-boost/Justfile b/rust/rollup-boost/Justfile new file mode 100644 index 0000000000000..a08f32edbfb3b --- /dev/null +++ b/rust/rollup-boost/Justfile @@ -0,0 +1,17 @@ +set positional-arguments + +# Run rollup-boost tests +test *args='': + cargo nextest run -p rollup-boost -p rollup-boost-types -p flashblocks-websocket-proxy {{args}} + +# Build rollup-boost binary (release) +build-release: + cargo build --release --bin rollup-boost + +# Build docker image +build-docker: + docker buildx build --build-arg RELEASE=true -t flashbots/rollup-boost:develop . + +# Build docker debug image +build-docker-debug: + docker buildx build --build-arg RELEASE=false -t flashbots/rollup-boost:develop . diff --git a/rust/rollup-boost/LICENSE b/rust/rollup-boost/LICENSE new file mode 100644 index 0000000000000..a9e1fd39fa2ad --- /dev/null +++ b/rust/rollup-boost/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Flashbots + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/rust/rollup-boost/Makefile b/rust/rollup-boost/Makefile new file mode 100644 index 0000000000000..abec98aa39127 --- /dev/null +++ b/rust/rollup-boost/Makefile @@ -0,0 +1,51 @@ +# Heavily inspired by Lighthouse: https://github.com/sigp/lighthouse/blob/stable/Makefile +# and Reth: https://github.com/paradigmxyz/reth/blob/main/Makefile +.DEFAULT_GOAL := help + +GIT_VER ?= $(shell git describe --tags --always --dirty="-dev") +GIT_TAG ?= $(shell git describe --tags --abbrev=0) + +FEATURES ?= + +##@ Help +.PHONY: help +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "Usage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +.PHONY: v +v: ## Show the current version + @echo "Version: ${GIT_VER}" + +##@ Build + +.PHONY: clean +clean: ## Clean up + cargo clean + +.PHONY: build +build: ## Build (debug version) + cargo build --features "$(FEATURES)" + +.PHONY: docker-image +docker-image: ## Build a rollup-boost Docker image + docker build --platform linux/amd64 --build-arg FEATURES="$(FEATURES)" . -t rollup-boost + +##@ Dev + +.PHONY: lint +lint: ## Run the linters + cargo fmt -- --check + cargo clippy --features "$(FEATURES)" -- -D warnings + +.PHONY: test +test: ## Run the tests for rollup-boost + cargo test --verbose --features "$(FEATURES)" + +.PHONY: lt +lt: lint test ## Run "lint" and "test" + +.PHONY: fmt +fmt: ## Format the code + cargo fmt + cargo fix --allow-staged + cargo clippy --features "$(FEATURES)" --fix --allow-staged diff --git a/rust/rollup-boost/README.md b/rust/rollup-boost/README.md new file mode 100644 index 0000000000000..0d86898aad2e6 --- /dev/null +++ b/rust/rollup-boost/README.md @@ -0,0 +1,209 @@ +![Rollup Boost banner](./assets/Rollup-Boost_brand-assets/rollup-boost_banner.png) + +[![Test status](https://github.com/flashbots/rollup-boost/actions/workflows/test.yml/badge.svg?branch=main)](https://github.com/flashbots/rollup-boost/actions?query=workflow%3A%22Tests%22) + +# Rollup Boost + +Rollup Boost is a block builder sidecar for Optimism Stack chains to enable external block production. To read more about the design, check out the [design doc](https://github.com/ethereum-optimism/design-docs/blob/main/protocol/external-block-production.md). + +## Usage + +Run the rollup-boost server using the following command: + +``` +cargo run --bin rollup-boost -- [OPTIONS] +``` + +### Command-line Options + +- `--l2-jwt-token `: JWT token for L2 authentication (required) +- `--l2-jwt-path `: Path to the L2 JWT secret file (required if `--l2-jwt-token` is not provided) +- `--l2-url `: URL of the local L2 execution engine (required) +- `--builder-url `: URL of the builder execution engine (required) +- `--builder-jwt-token `: JWT token for builder authentication (required) +- `--builder-jwt-path `: Path to the builder JWT secret file (required if `--builder-jwt-token` is not provided) +- `--rpc-host `: Host to run the server on (default: 127.0.0.1) +- `--rpc-port `: Port to run the server on (default: 8081) +- `--tracing`: Enable tracing (default: false) +- `--log-level `: Log level (default: info) +- `--log-format `: Log format (default: text) +- `--metrics`: Enable metrics (default: false) +- `--metrics-host `: Host to run the metrics server on (default: 127.0.0.1) +- `--debug-host `: Host to run the server on (default: 127.0.0.1) +- `--debug-server-port `: Port to run the debug server on (default: 5555) + +### Environment Variables + +You can also set the options using environment variables. See .env.example to use the default values. + +### Example + +``` +cargo run --bin rollup-boost -- --l2-jwt-token your_jwt_token --l2-url http://localhost:8545 --builder-jwt-token your_jwt_token --builder-url http://localhost:8546 +``` + +## Core System Workflow + +1. `rollup-boost` receives an `engine_FCU` with the attributes to initiate block building: + - It relays the call to proposer `op-geth` as usual and multiplexes the call to builder. + - The FCU call returns the proposer payload id and internally maps the builder payload id to proposer payload id in the case the payload ids are not the same. +2. When `rollup-boost` receives an `engine_getPayload`: + - It queries proposer `op-geth` for a fallback block. + - In parallel, it queries builder for a block. +3. Upon receiving the builder block: + - `rollup-boost` validates the block with proposer `op-geth` using `engine_newPayload`. + - This validation ensures the block will be valid for proposer `op-geth`, preventing network stalls due to invalid blocks. + - If the external block is valid, it is returned to the proposer `op-node`. Otherwise, `rollup-boost` will return the fallback block. +4. The proposer `op-node` sends a `engine_newPayload` request to `rollup-boost` and another `engine_FCU` without attributes to update chain state. + - `rollup-boost` just relays the calls to proposer `op-geth`. + - Note that since we already called `engine_newPayload` on the proposer `op-geth` in the previous step, the block should be cached and add minimal latency. + - The builder `op-node` will receive blocks via p2p gossip and keep the builder node in sync via the engine api. + +```mermaid +sequenceDiagram + box Proposer + participant op-node + participant rollup-boost + participant op-geth + end + box Builder + participant builder-op-node as op-node + participant builder-op-geth as builder + end + + Note over op-node, builder-op-geth: 1. Triggering Block Building + op-node->>rollup-boost: engine_FCU (with attrs) + rollup-boost->>op-geth: engine_FCU (with attrs) + rollup-boost->>builder-op-geth: engine_FCU (with attrs) + rollup-boost->>op-node: proposer payload id + + Note over op-node, builder-op-geth: 2. Get Local and Builder Blocks + op-node->>rollup-boost: engine_getPayload + rollup-boost->>op-geth: engine_getPayload + rollup-boost->>builder-op-geth: engine_getPayload + + Note over op-node, builder-op-geth: 3. Validating and Returning Builder Block + rollup-boost->>op-geth: engine_newPayload + op-geth->>rollup-boost: block validity + rollup-boost->>op-node: block payload + + Note over op-node, builder-op-geth: 4. Updating Chain State + op-node->>rollup-boost: engine_newPayload + rollup-boost->>op-geth: engine_newPayload + op-node->>rollup-boost: engine_FCU (without attrs) + rollup-boost->>op-geth: engine_FCU (without attrs) +``` + +## RPC Calls + +By default, `rollup-boost` will proxy all RPC calls from the proposer `op-node` to its local `op-geth` node. These are the list of RPC calls that are proxied to both the proposer and the builder execution engines: + +- `engine_forkchoiceUpdatedV3`: this call is only multiplexed to the builder if the call contains payload attributes and the no_tx_pool attribute is false. +- `engine_getPayloadV3`: this is used to get the builder block. +- `miner_*`: this allows the builder to be aware of changes in effective gas price, extra data, and [DA throttling requests](https://docs.optimism.io/builders/chain-operators/configuration/batcher) from the batcher. +- `eth_sendRawTransaction*`: this forwards transactions the proposer receives to the builder for block building. This call may not come from the proposer `op-node`, but directly from the rollup's rpc engine. + +## Debug API + +The Debug API is a JSON-RPC API that can be used to configure rollup-boost's execution mode. The execution mode determines how rollup-boost makes requests to the builder: + +- `enabled`: The builder receives all the engine API calls from rollup-boost. +- `dry-run`: The builder receives all the engine API calls from rollup-boost except for the get payload request. +- `disabled`: The builder does not receive any engine API calls from rollup-boost. This allows rollup-boost to stop sending requests to the builder during runtime without needing a restart. + +By default, the debug server runs on port 5555. + +### Specification + +The debug API implements the following methods: + +#### `debug_setExecutionMode` + +Sets the execution mode of rollup-boost. + +**Params** + +- execution_mode: The new execution mode (available options 'dry_run', 'enabled' or 'disabled'). + +**Returns** + +- `execution_mode`: The new execution mode. + +**Example** + +To set dry run mode: + +```bash +curl -X POST -H "Content-Type: application/json" --data '{ + "jsonrpc": "2.0", + "id": 1, + "method": "debug_setExecutionMode", + "params": [{"execution_mode":"dry_run"}] +}' http://localhost:5555 +``` + +To disable rollup-boost calls to the builder: + +```bash +curl -X POST -H "Content-Type: application/json" --data '{ + "jsonrpc": "2.0", + "id": 1, + "method": "debug_setExecutionMode", + "params": [{"execution_mode":"disabled"}] +}' http://localhost:5555 +``` + +#### `debug_getExecutionMode` + +Gets the current execution mode of rollup-boost. + +**Params** + +None + +**Returns** + +- `execution_mode`: The current execution mode. + +**Example** + +```bash +curl -X POST -H "Content-Type: application/json" --data '{ + "jsonrpc": "2.0", + "id": 1, + "method": "debug_getExecutionMode", + "params": [] +}' http://localhost:5555 +``` + +### Debug Command + +`rollup-boost` also includes a debug command to interact with the debug API from rollup-boost. + +This is useful for testing interactions with external block builders in a production environment without jeopardizing OP stack liveness, especially for network upgrades. + +### Usage + +To run rollup-boost in debug mode with a specific execution mode, you can use the following command: + +``` +rollup-boost debug set-execution-mode [enabled|dry-run|disabled] +``` + +## Maintainers + +- [@avalonche](https://github.com/avalonche) +- [@ferranbt](https://github.com/ferranbt) +- [@0xOsiris](https://github.com/0xOsiris) +- [@0xKitsune](https://github.com/0xKitsune) +- [Eric Woolsey](https://github.com/0xForerunner) + +## License + +The code in this project is free software under the [MIT License](/LICENSE). + +--- + +Made with ☀️ by the ⚡🤖 collective. + +🎨 For Rollup Boost brand guidelines and to download brand assets, [click here](https://desert-shelf-a90.notion.site/Rollup-Boost-Brand-Guide-1c08490ae44f80ecaafee5975f71228f). diff --git a/rust/rollup-boost/RELEASE.md b/rust/rollup-boost/RELEASE.md new file mode 100644 index 0000000000000..a93056bc99126 --- /dev/null +++ b/rust/rollup-boost/RELEASE.md @@ -0,0 +1,81 @@ +# Rollup Boost Release Process + +Guide for the release process of rollup-boost. + +1. **Pre-Release Checks** + +First of all, check all the CI tests and checks are running fine in the latest main branch. + +Check out the latest main branch: + +```bash +git checkout main +git pull origin main +``` + +Then run the following commands to check the code is working fine: + +```bash +make lint +make test +git status # should show no changes + +# Start rollup-boost with the example .env config +cargo run --bin rollup-boost -- + +# Call the health endpoint +curl localhost:8081/healthz +``` + +2. **Release Candidate Process** + +- Tag release candidate version: + +```bash +git tag -s v0.5-rc1 +git push origin --tags +``` + +- Test Docker image: + +```bash +docker pull flashbots/rollup-boost:0.5rc1 +``` + +3. **Testing & Validation** + +- Test the docker image in internal testnets + - Check no error logs in rollup-boost + - Check no error logs in the builder or op-node + - Check chain liveness is healthy and blocks are produced + - Check the builder is landing blocks onchain by seeing if the builder transaction is included in the chain + - Check metrics to see if there is any anomaly such as latency or blocks delivered + - Use [contender](https://github.com/flashbots/contender) or other transaction spammer to check transactions are being included in the block +- Coordinate testing with external partners on testnets + - Generally the same checklist as for internal testnets + - Ask external partners if they observe any latency issues or error logs in rollup-boost +- Collect sign-offs from: + - Op-Stack operators running rollup-boost + - Code reviewers (check [recent contributors](https://github.com/flashbots/rollup-boost/graphs/contributors)) + +4. **Final Release Steps** + +- Create final tags: + +```bash +git tag -s v0.5 +``` + +- Push all changes: `git push origin main --tags` +- Finalize GitHub release draft with: + - Change log and new features + - Breaking changes to the API or command line arguments + - Compatibility with the latest OP Stack and relevant forks + - Usage instructions (`rollup-boost --help`) + +5. **Post-Release Tasks** + +- Informing the community and relevant stakeholders, such as: + - Internal team comms + - External rollup-boost partners + - Optimism Discord diff --git a/rust/rollup-boost/assets/1-1-builder-rb.png b/rust/rollup-boost/assets/1-1-builder-rb.png new file mode 100644 index 0000000000000..6be8eaf21e063 Binary files /dev/null and b/rust/rollup-boost/assets/1-1-builder-rb.png differ diff --git a/rust/rollup-boost/assets/Rollup-Boost_brand-assets/rollup-boost_banner.png b/rust/rollup-boost/assets/Rollup-Boost_brand-assets/rollup-boost_banner.png new file mode 100644 index 0000000000000..ec8f37115bc95 Binary files /dev/null and b/rust/rollup-boost/assets/Rollup-Boost_brand-assets/rollup-boost_banner.png differ diff --git a/rust/rollup-boost/assets/Rollup-Boost_brand-assets/rollup-boost_logo-black.svg b/rust/rollup-boost/assets/Rollup-Boost_brand-assets/rollup-boost_logo-black.svg new file mode 100644 index 0000000000000..a03e15abfe52b --- /dev/null +++ b/rust/rollup-boost/assets/Rollup-Boost_brand-assets/rollup-boost_logo-black.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/rust/rollup-boost/assets/Rollup-Boost_brand-assets/rollup-boost_logo-bright-green.svg b/rust/rollup-boost/assets/Rollup-Boost_brand-assets/rollup-boost_logo-bright-green.svg new file mode 100644 index 0000000000000..c8004658d15c9 --- /dev/null +++ b/rust/rollup-boost/assets/Rollup-Boost_brand-assets/rollup-boost_logo-bright-green.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/rust/rollup-boost/assets/Rollup-Boost_brand-assets/rollup-boost_logo-green.svg b/rust/rollup-boost/assets/Rollup-Boost_brand-assets/rollup-boost_logo-green.svg new file mode 100644 index 0000000000000..e47b30735fe90 --- /dev/null +++ b/rust/rollup-boost/assets/Rollup-Boost_brand-assets/rollup-boost_logo-green.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/rust/rollup-boost/assets/Rollup-Boost_brand-assets/rollup-boost_logo-white.svg b/rust/rollup-boost/assets/Rollup-Boost_brand-assets/rollup-boost_logo-white.svg new file mode 100644 index 0000000000000..b01fdb2650028 --- /dev/null +++ b/rust/rollup-boost/assets/Rollup-Boost_brand-assets/rollup-boost_logo-white.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/rust/rollup-boost/assets/op-stack-ha.png b/rust/rollup-boost/assets/op-stack-ha.png new file mode 100644 index 0000000000000..aa7308641cdf6 Binary files /dev/null and b/rust/rollup-boost/assets/op-stack-ha.png differ diff --git a/rust/rollup-boost/assets/rollup-boost-architecture.png b/rust/rollup-boost/assets/rollup-boost-architecture.png new file mode 100644 index 0000000000000..e4627333800ee Binary files /dev/null and b/rust/rollup-boost/assets/rollup-boost-architecture.png differ diff --git a/rust/rollup-boost/assets/rollup-boost-op-conductor.png b/rust/rollup-boost/assets/rollup-boost-op-conductor.png new file mode 100644 index 0000000000000..6c6c45c8e635d Binary files /dev/null and b/rust/rollup-boost/assets/rollup-boost-op-conductor.png differ diff --git a/rust/rollup-boost/book/book.toml b/rust/rollup-boost/book/book.toml new file mode 100644 index 0000000000000..2cf72305d9da2 --- /dev/null +++ b/rust/rollup-boost/book/book.toml @@ -0,0 +1,22 @@ +[book] +authors = ["Rollup Boost Core Contributors"] +language = "en" +src = "src" +title = "Rollup Boost" +description = "The Rollup Boost is a sidecar to enable rollup extensions" + +[output.html] +theme = "theme" +git-repository-url = "https://github.com/flashbots/rollup-boost" +default-theme = "ayu" +no-section-label = true + +[output.html.fold] +enable = true +level = 1 + +[preprocessor.mermaid] +command = "mdbook-mermaid" + +[build] +build-dir = "book" diff --git a/rust/rollup-boost/book/src/SUMMARY.md b/rust/rollup-boost/book/src/SUMMARY.md new file mode 100644 index 0000000000000..fec78f8faf72d --- /dev/null +++ b/rust/rollup-boost/book/src/SUMMARY.md @@ -0,0 +1,23 @@ +# Table of Contents + +- [Introduction](./intro.md) + +- [Architecture](architecture/README.md) + - [External Block Building](./architecture/external-building.md) + - [Rollup Boost Sidecar](./architecture/sidecar.md) + +- [Rollup Boost Modules](./modules/README.md) + - [Flashblocks](./modules/flashblocks.md) + - [Flashtestations](./modules/flashtestations.md) + +- [For Rollup Operators](./operators/README.md) + - [Running Rollup Boost Locally](./operators/local.md) + - [Running Rollup Boost in Production](./operators/production.md) + - [High Availability Setup](./operators/ha-setup.md) + +- [For DApp Developers](./developers/README.md) + - [Flashblocks Data over JSON RPC](./developers/flashblocks-rpc.md) + +- [CLI Reference](./cli/README.md) + - [rollup-boost](./cli/rollup-boost.md) + - [websocket-proxy](./cli/websocket-proxy.md) diff --git a/rust/rollup-boost/book/src/architecture/README.md b/rust/rollup-boost/book/src/architecture/README.md new file mode 100644 index 0000000000000..4b50059b8a4e3 --- /dev/null +++ b/rust/rollup-boost/book/src/architecture/README.md @@ -0,0 +1,5 @@ +# Architecture + +[External Block Building](./external-building.md) + +[Rollup Boost Sidecar](./sidecar.md) \ No newline at end of file diff --git a/rust/rollup-boost/book/src/architecture/external-building.md b/rust/rollup-boost/book/src/architecture/external-building.md new file mode 100644 index 0000000000000..cd34c0c31492b --- /dev/null +++ b/rust/rollup-boost/book/src/architecture/external-building.md @@ -0,0 +1,9 @@ +# External Block Building + +Much like [mev-boost](https://github.com/flashbots/mev-boost) on Ethereum layer 1, rollup-boost is a sidecar that runs alongside the sequencer in Optimism chains to source blocks from external block builders. Unlike mev-boost, there are no code changes or extra config to support external block building in the consensus node as rollup-boost reuses the existing Engine API to source external blocks. + +![architecture](https://raw.githubusercontent.com/flashbots/rollup-boost/refs/heads/main/assets/rollup-boost-architecture.png) + +Instead of pointing to the local execution client, the rollup operator simply needs to point the consensus node to rollup-boost and configure rollup-boost to connect to both the local execution client and the external block builder. + +To read more about the design, check out the [design doc](https://github.com/ethereum-optimism/design-docs/blob/main/protocol/external-block-production.md) for how rollup-boost integrates into the Optimism stack. diff --git a/rust/rollup-boost/book/src/architecture/sidecar.md b/rust/rollup-boost/book/src/architecture/sidecar.md new file mode 100644 index 0000000000000..08715a9de6af4 --- /dev/null +++ b/rust/rollup-boost/book/src/architecture/sidecar.md @@ -0,0 +1,78 @@ +# Architecture Overview + +Rollup Boost modifies the workflow of the Engine API to enable block building and flashblocks. It uses the JWT token in the Engine API as authentication for the builder and multiplexes the RPC calls to the builder to source external blocks. + +## Core System Workflow + +1. `rollup-boost` receives an `engine_FCU` with the attributes to initiate block building: + - It relays the call to proposer `op-geth` as usual and multiplexes the call to builder. + - The FCU call returns the proposer payload id and internally maps the builder payload id to proposer payload id in the case the payload ids are not the same. +2. When `rollup-boost` receives an `engine_getPayload`: + - It queries proposer `op-geth` for a fallback block. + - In parallel, it queries builder for a block. +3. Upon receiving the builder block: + - `rollup-boost` validates the block with proposer `op-geth` using `engine_newPayload`. + - This validation ensures the block will be valid for proposer `op-geth`, preventing network stalls due to invalid blocks. + - If the external block is valid, it is returned to the proposer `op-node`. Otherwise, `rollup-boost` will return the fallback block. +4. The proposer `op-node` sends a `engine_newPayload` request to `rollup-boost` and another `engine_FCU` without attributes to update chain state. + - `rollup-boost` just relays the calls to proposer `op-geth`. + - Note that since we already called `engine_newPayload` on the proposer `op-geth` in the previous step, the block should be cached and add minimal latency. + - The builder `op-node` will receive blocks via p2p gossip and keep the builder node in sync via the engine api. + +```mermaid +%%{init: {'theme': 'base', 'themeVariables': { 'background': '#f4f4f4', 'primaryColor': '#2c3e50', 'primaryTextColor': '#ffffff', 'primaryBorderColor': '#34495e', 'lineColor': '#34495e', 'secondaryColor': '#ecf0f1', 'tertiaryColor': '#bdc3c7'}}}%% +sequenceDiagram + box Proposer + participant op-node + participant rollup-boost + participant op-geth + end + box Builder + participant builder-op-node as op-node + participant builder-op-geth as builder + end + + Note over op-node, builder-op-geth: 1. Triggering Block Building + op-node->>rollup-boost: engine_FCU (with attrs) + rollup-boost->>op-geth: engine_FCU (with attrs) + rollup-boost->>builder-op-geth: engine_FCU (with attrs) + rollup-boost->>op-node: proposer payload id + + Note over op-node, builder-op-geth: 2. Get Local and Builder Blocks + op-node->>rollup-boost: engine_getPayload + rollup-boost->>op-geth: engine_getPayload + rollup-boost->>builder-op-geth: engine_getPayload + + Note over op-node, builder-op-geth: 3. Validating and Returning Builder Block + rollup-boost->>op-geth: engine_newPayload + op-geth->>rollup-boost: block validity + rollup-boost->>op-node: block payload + + Note over op-node, builder-op-geth: 4. Updating Chain State + op-node->>rollup-boost: engine_newPayload + rollup-boost->>op-geth: engine_newPayload + op-node->>rollup-boost: engine_FCU (without attrs) + rollup-boost->>op-geth: engine_FCU (without attrs) +``` + +## RPC Calls + +By default, `rollup-boost` will proxy all RPC calls from the proposer `op-node` to its local `op-geth` node. These are the list of RPC calls that are proxied to both the proposer and the builder execution engines: + +- `engine_forkchoiceUpdated`: this call is only multiplexed to the builder if the call contains payload attributes and the no_tx_pool attribute is false. +- `engine_getPayload`: this is used to get the builder block. +- `miner_*`: this allows the builder to be aware of changes in effective gas price, extra data, and [DA throttling requests](https://docs.optimism.io/builders/chain-operators/configuration/batcher) from the batcher. +- `eth_sendRawTransaction*`: this forwards transactions the proposer receives to the builder for block building. This call may not come from the proposer `op-node`, but directly from the rollup's rpc engine. + +### Boost Sync + +`rollup-boost` will use boost sync by default to sync directly with the proposer `op-node` via the Engine API. Boost sync improves the performance of keeping the builder in sync with the tip of the chain by removing the need to receive chain updates via p2p from the builder `op-node` once the builder is synced. This entails additional engine api calls that are multiplexed to the builder from rollup-boost: + +- `engine_forkchoiceUpdated`: this call will be multiplexed to the builder regardless of whether the call contains payload attributes or not. +- `engine_newPayload`: ensures the builder has the latest block if the local payload was used. + +## Reorgs + +Rollup-boost remains unaffected by blockchain reorganizations due to its stateless design as a pure proxy layer between the consensus layer (op-node) and execution engines. + +When reorgs impact the sequencing epoch derivation or cause drift in the L2 chain state, rollup-boost simply proxies all Engine API calls—including fork choice updates reflecting the new canonical chain and payload requests for reorg recovery—directly to both the builder and local execution client without maintaining any state about the reorganization. The actual reorg handling, including re-deriving the correct L2 blocks from the updated sequencing windows and managing any resulting drift, is performed by the underlying execution engines (e.g op-geth, op-reth) which receive these reorg signals through the standard Engine API methods that rollup-boost forwards. \ No newline at end of file diff --git a/rust/rollup-boost/book/src/cli/README.md b/rust/rollup-boost/book/src/cli/README.md new file mode 100644 index 0000000000000..192780970b071 --- /dev/null +++ b/rust/rollup-boost/book/src/cli/README.md @@ -0,0 +1,5 @@ +# CLI Reference + +[rollup-boost](./rollup-boost.md) + +[websocket-proxy](./websocket-proxy.md) \ No newline at end of file diff --git a/rust/rollup-boost/book/src/cli/rollup-boost.md b/rust/rollup-boost/book/src/cli/rollup-boost.md new file mode 100644 index 0000000000000..fd904de1b75ec --- /dev/null +++ b/rust/rollup-boost/book/src/cli/rollup-boost.md @@ -0,0 +1,19 @@ +# rollup-boost + +### Command-line Options + +- `--l2-jwt-token `: JWT token for L2 authentication (required) +- `--l2-jwt-path `: Path to the L2 JWT secret file (required if `--l2-jwt-token` is not provided) +- `--l2-url `: URL of the local L2 execution engine (required) +- `--builder-url `: URL of the builder execution engine (required) +- `--builder-jwt-token `: JWT token for builder authentication (required) +- `--builder-jwt-path `: Path to the builder JWT secret file (required if `--builder-jwt-token` is not provided) +- `--rpc-host `: Host to run the server on (default: 127.0.0.1) +- `--rpc-port `: Port to run the server on (default: 8081) +- `--tracing`: Enable tracing (default: false) +- `--log-level `: Log level (default: info) +- `--log-format `: Log format (default: text) +- `--metrics`: Enable metrics (default: false) +- `--metrics-host `: Host to run the metrics server on (default: 127.0.0.1) +- `--debug-host `: Host to run the server on (default: 127.0.0.1) +- `--debug-server-port `: Port to run the debug server on (default: 5555) \ No newline at end of file diff --git a/rust/rollup-boost/book/src/cli/websocket-proxy.md b/rust/rollup-boost/book/src/cli/websocket-proxy.md new file mode 100644 index 0000000000000..06742c6d82d89 --- /dev/null +++ b/rust/rollup-boost/book/src/cli/websocket-proxy.md @@ -0,0 +1,49 @@ +# websocket-proxy + +## Command-line Options + +### Connection Configuration + +- `--listen-addr
`: The address and port to listen on for incoming connections (default: 0.0.0.0:8545) +- `--upstream-ws `: WebSocket URI of the upstream server to connect to (required, can specify multiple with comma separation) + +### Rate Limiting + +- `--instance-connection-limit `: Maximum number of concurrently connected clients per instance (default: 100) +- `--per-ip-connection-limit `: Maximum number of concurrently connected clients per IP (default: 10) +- `--redis-url `: Redis URL for distributed rate limiting (e.g., redis://localhost:6379). If not provided, in-memory rate limiting will be used +- `--redis-key-prefix `: Prefix for Redis keys (default: flashblocks) + +### Message Handling + +- `--message-buffer-size `: Number of messages to buffer for lagging clients (default: 20) +- `--enable-compression`: Enable Brotli compression on messages to downstream clients (default: false) +- `--ip-addr-http-header
`: Header to use to determine the client's origin IP (default: X-Forwarded-For) + +### Authentication + +- `--api-keys `: API keys to allow in format `:,:`. If not provided, the endpoint will be unauthenticated + +### Logging + +- `--log-level `: Log level (default: info) +- `--log-format `: Format for logs, can be json or text (default: text) + +### Metrics + +- `--metrics`: Enable Prometheus metrics (default: true) +- `--metrics-addr
`: Address to run the metrics server on (default: 0.0.0.0:9000) +- `--metrics-global-labels `: Tags to add to every metrics emitted in format `label1=value1,label2=value2` (default: "") +- `--metrics-host-label`: Add the hostname as a label to all Prometheus metrics (default: false) + +### Upstream Connection Management + +- `--subscriber-max-interval-ms `: Maximum backoff allowed for upstream connections in milliseconds (default: 20000) +- `--subscriber-ping-interval-ms `: Interval in milliseconds between ping messages sent to upstream servers to detect unresponsive connections (default: 2000) +- `--subscriber-pong-timeout-ms `: Timeout in milliseconds to wait for pong responses from upstream servers before considering the connection dead (default: 4000) + +### Client Health Checks + +- `--client-ping-enabled`: Enable ping/pong client health checks (default: false) +- `--client-ping-interval-ms `: Interval in milliseconds to send ping messages to clients (default: 15000) +- `--client-pong-timeout-ms `: Timeout in milliseconds to wait for pong response from clients (default: 30000) diff --git a/rust/rollup-boost/book/src/developers/README.md b/rust/rollup-boost/book/src/developers/README.md new file mode 100644 index 0000000000000..da10faa252085 --- /dev/null +++ b/rust/rollup-boost/book/src/developers/README.md @@ -0,0 +1,3 @@ +# For DApp Developers + +[Flashblocks Data over JSON RPC](./flashblocks-rpc.md) \ No newline at end of file diff --git a/rust/rollup-boost/book/src/developers/flashblocks-rpc.md b/rust/rollup-boost/book/src/developers/flashblocks-rpc.md new file mode 100644 index 0000000000000..a71017683a5a3 --- /dev/null +++ b/rust/rollup-boost/book/src/developers/flashblocks-rpc.md @@ -0,0 +1,35 @@ +# Reading Flashblocks Data over Ethereum JSON RPC + +The Flashblocks RPC implementation provides preconfirmation data through modified Ethereum JSON-RPC endpoints using the `pending` tag. This allows applications to access transaction and state information from Flashblocks before they are finalized on the blockchain. + +To run a node capable of serving Flashblocks, consider using https://github.com/base/node-reth/ or https://github.com/paradigmxyz/reth implementations. + +At the time of writing base version has more advanced flashblocks support, including flashblocks sync over multiple blocks in case canonical head is lagging. + +## Data Flow + +The Flashblocks RPC implementation follows this data flow: + +1. **WebSocket Connection**: Establishes connection to rollup-boost Flashblocks endpoint +2. **Payload Reception**: Receives `FlashblocksPayloadV1` messages containing transaction batches +3. **Cache Processing**: Updates in-memory cache with new transaction and state data +4. **RPC Queries**: Responds to client requests using cached pending data +5. **State Management**: Maintains consistency between confirmed and pending states + +## Flashblocks Payload Structure + +Flashblocks payloads contain the following information: + +```rust,ignore +pub struct FlashblocksPayloadV1 { + pub version: PayloadVersion, + pub execution_payload: OpExecutionPayloadEnvelope, + pub metadata: Metadata, +} + +pub struct Metadata { + pub receipts: HashMap, + pub new_account_balances: HashMap, + pub block_number: u64, +} +``` \ No newline at end of file diff --git a/rust/rollup-boost/book/src/intro.md b/rust/rollup-boost/book/src/intro.md new file mode 100644 index 0000000000000..aeb0e8e8330c9 --- /dev/null +++ b/rust/rollup-boost/book/src/intro.md @@ -0,0 +1,76 @@ +# Rollup Boost + +Rollup Boost is a lightweight sidecar for rollups that enables rollup extensions. These extensions provide an efficient, decentralized, and verifiable block building platform for rollups. + +## What is Rollup Boost? + +[Rollup Boost](https://github.com/flashbots/rollup-boost/) is a sequencer sidecar that uses the [Engine API](https://specs.optimism.io/protocol/exec-engine.html#engine-api) in the Optimism stack to enable its rollup extensions. Its scalable modules allow for faster confirmation times, stronger user guarantees, and more. + +It requires no modification to the OP stack software and allows rollup operators to connect to an external builder. + +It is designed and developed by [Flashbots](https://flashbots.net/) under a MIT license. We invite developers, rollup operators, and researchers to join in developing this open-source software to achieve efficiency and decentralization in the rollup ecosystem. + +## Who is this for? + +Rollup Boost is designed for: + +- **Rollup Operators**: Teams running OP Stack or compatible rollups who want to improve efficiency, reduce confirmation times and have more control over block construction. + +- **Rollup Developers**: Engineers building on rollups to unlock new use cases with rollup extensions. + +- **Block Builders**: Teams focused on MEV who want to offer specialized block building services for rollups. + +- **Researchers**: Those exploring new approaches to MEV and block production strategies. + +## What are the design goals of Rollup Boost? + +**Simplicity** + +Rollup Boost was designed with minimal complexity, maintaining a lightweight setup that avoids additional latency. + +By focusing on simplicity, Rollup Boost integrates smoothly into the OP stack without disrupting existing op-node and execution engine communication. + +This way, we prioritize the reliability of the block proposal hot path over feature richness. We achieve this by striving for a stateless design and limit scope creep. + +**Modularity** + +Recognizing that rollups have varying needs, Rollup Boost was built with extensibility in mind. It is designed to support custom block-building features, allowing operators to implement modifications for custom block selection rules. + +**Liveness Protection** + +To safeguard against liveness risks, Rollup Boost offers a local block production fallback if there is a failure in the external builder. + +Rollup Boost also guards against invalid blocks from the builder by validating the blocks against the local execution engine. This ensures the proposer always have a fallback path and minimizes liveness risks of the chain. + +All modules in Rollup Boost are designed and tested with a fallback scenario in mind to maintain high performance and uptime. + +**Compatibility** + +Rollup Boost allows operators to continue using the standard `op-node` and `op-geth` / `op-reth` software without any custom forks. + +It is also compatible with `op-conductor` when running sequencers with a high availability setup. + +It aims to be on parity with the performance of using a vanilla proposer setup without Rollup Boost. + +
+ +For more details on the design philosophy and testing strategy of Rollup Boost, see this [doc](https://github.com/flashbots/rollup-boost/blob/main/docs/design-philosophy-testing-strategy.md). + +## Is it secure? + +Rollup Boost underwent a security audit on May 11, 2025, with [Nethermind](https://www.nethermind.io). See the full report [here](https://github.com/flashbots/rollup-boost/blob/main/docs/NM_0411_0491_Security_Review_World_Rollup_Boost.pdf). + +In addition to the audit, we have an extensive suite of integration tests in our CI to ensure the whole flow works end-to-end with the OP stack as well as with external builders. + +## Getting Help + +- GitHub Issues: Open an [issue](https://github.com/flashbots/rollup-boost/issues/new) for bugs or feature requests +- Forum: Join the [forum](https://collective.flashbots.net/c/rollup-boost) for development updates and research discussions + +## Sections + +Here are some useful sections to jump to: + +- Run Rollup Boost with the full Op stack setup locally by following this [guide](./operators/local.md). +- Read about how [flashblocks](./modules/flashblocks.md) work in Rollup Boost +- Query the [JSON-RPC](./developers/flashblocks-rpc.md) of a flashblocks enabled node Foundry's `cast` or `curl`. \ No newline at end of file diff --git a/rust/rollup-boost/book/src/modules/README.md b/rust/rollup-boost/book/src/modules/README.md new file mode 100644 index 0000000000000..ea2926e5bb347 --- /dev/null +++ b/rust/rollup-boost/book/src/modules/README.md @@ -0,0 +1,5 @@ +# Rollup Boost Modules + +[Flashblocks](./flashblocks.md) + +[Flashtestations](./flashtestations.md) \ No newline at end of file diff --git a/rust/rollup-boost/book/src/modules/flashblocks.md b/rust/rollup-boost/book/src/modules/flashblocks.md new file mode 100644 index 0000000000000..390de7eeffdc9 --- /dev/null +++ b/rust/rollup-boost/book/src/modules/flashblocks.md @@ -0,0 +1,48 @@ +# Flashblocks + +Flashblocks is a rollup-boost module that enables fast confirmation times by breaking down block construction into smaller, incremental sections. This feature allows for pre-confirmations and improved user experience through faster block finalization. + +## Architecture + +The Flashblocks setup consists of three main components: + +- **rollup-boost**: The main service with Flashblocks enabled +- **op-rbuilder**: A builder with Flashblocks support +- **op-reth**: A fallback builder (standard EL node) + +It utilizes WebSockets to stream Flashblocks from the builder to rollup-boost to minimize the latency between the Flashblocks and the sequencer. + +### Flashblocks Workflow + +```mermaid +flowchart LR + subgraph Sequencer + ON[OP Node] + RB[Rollup Boost] + FEL[Fallback EL] + BB[Block Builder] + end + + subgraph Network + WSP[WebSocket Proxy] + end + + subgraph Clients + RPC[RPC Providers] + Users[End Users] + end + + ON --> RB + RB --> FEL + RB <--> BB + RB --> WSP + WSP --> RPC + RPC --> Users +``` + +1. **WebSocket Communication**: Flashblocks utilizes WebSockets to stream Flashblocks from the builder to rollup-boost once it's constructed to minimize the latency between the Flashblocks and the sequencer. +2. **WebSocket Proxy**: rollup-boost caches these Flashblocks and streams them to a WebSocket proxy. The proxy then fans out the Flashblocks to downstream RPC providers. +3. **RPC Overlay**: Once RPC providers receive these Flashblocks from the proxy, clients need to run a modified node that supports serving RPC requests with the Flashblocks preconfirmation state. +4. **Full Block**: At the end of the slot, the proposer requests a full block from rollup-boost. If rollup-boost does not have the Flashblocks cached due to a lost connection, it will fall back to the getPayload call to both the local execution client and the builder. + +See the [specs](https://github.com/flashbots/rollup-boost/blob/main/specs/flashblocks.md) for the full design details for Flashblocks. \ No newline at end of file diff --git a/rust/rollup-boost/book/src/modules/flashtestations.md b/rust/rollup-boost/book/src/modules/flashtestations.md new file mode 100644 index 0000000000000..7a72d1a05d951 --- /dev/null +++ b/rust/rollup-boost/book/src/modules/flashtestations.md @@ -0,0 +1,122 @@ +# Flashtestations + +Flashtestations is a rollup-boost module that provides onchain TEE (Trusted Execution Environment) attestations and block proofs to verifiably prove that blocks were built inside a TEE. This provides user guarantees such as priority ordering. + +## Architecture + +```text ++-------------------------+ +---------------------+ +| TDX VM | | Onchain Verifier | +| | Attestation | | +| +-----------------+ | Quote | +-----------------+ | +| | TEE Workload | | ---------------> | | DCAP Attestation| | +| | | | | | Verifier | | +| | (measurements) | | | | | | +| +-----------------+ | | +--------+--------+ | +| | | | | ++-------------------------+ | v | + | +-----------------+ | ++-------------------------+ | | Intel | | +| Consumer Contract | | | Endorsements | | +| | | | | | +| +-----------------+ | | +--------+--------+ | +| | Operation | | | | | +| | Authorization | | | v | +| +-----------------+ | | +-----------------+ | +| | | | | Registration | | ++---------+---------------+ | | Logic | | + | | +--------+--------+ | + | +----------+----------+ + | | ++---------+---------------+ v +| Policy | +---------------------------+ +| | isValid | Flashtestation Registry | +| +---------------------+ | Query | | +| | allowedWorkloadIds[]| | <--------------> | {teeAddress: registration}| +| | {registration: | | | map | +| | workloadId} map | | | | +| +---------------------+ | +---------------------------+ ++-------------------------+ +``` + +### Core Components + +1. **Onchain Verifier**: Validates TDX attestation quotes against current Intel endorsements. Provides cryptographic proof that a TEE-controlled address is generated within genuine TDX hardware +2. **Flashtestation Registry**: Tracks TEE-controlled addresses with valid attestations +3. **Policy Registry**: Defines which workloads are acceptable for specific operations +4. **Transparency Log**: Records all attestation events and endorsement changes + +## Flashtestations Workflow + +Flashtestations involve two main workflows: + +- Registering the block builder's TEE attestation +- Block builder TEE proof + +### TEE Attestation Registration + +1. **TEE Environment Setup**: The builder runs in a TDX environment with specific measurement registers. These are measurements of a reproducible build for a specific builder code commit and config. +2. **TEE Key Generation**: The builder generates a key pair on startup that never leaves the TEE environment +3. **Quote Generation**: The TEE generates an attestation quote containing: + - Measurement registers + - Report data with TEE-controlled address and any extra registration data +3. **Onchain Verification**: The quote is submitted onchain by the builder to the registry contract. The contract verifies that the quote is valid and the TEE address from the report data + +### Policy Layer + +The policy layer provides authorization for TEE services. This allows operators to authorize that only builders running a specific commit and config can submit TEE block proofs. + +Upon registration, a workloadId is derived from the measurements in the quote and operators can manage which workload ids are considered valid for a specific rollup. + +The Policy Registry can store metadata linking workload IDs to source code: + +```solidity +struct WorkloadMetadata { + string commitHash; // Git commit hash of source code + string[] sourceLocators; // URIs pointing to source code (https://, git://, ipfs://) +} +``` + +### Block Builder TEE Proofs + +If the builder has registered successfully with an authorized workload id, builders can append a verification transaction to each block. + +The block proof will be signed by the generated TEE address. Since the TEE address never leaves the TEE environment, we can ensure that block proofs signed by that key mean that the block was built inside a TEE. + +The builder will submit a transaction containing a block content hash, which is computed as: + +```solidity +function ComputeBlockContentHash(block, transactions) { + transactionHashes = [] + for each tx in transactions: + txHash = keccak256(rlp_encode(tx)) + transactionHashes.append(txHash) + + return keccak256(abi.encode( + block.parentHash, + block.number, + block.timestamp, + transactionHashes + )) +} +``` + +## Security Considerations + +### Critical Security Assumptions + +- **Private Key Management**: TEE private keys must never leave the TEE boundaries +- **Attestation Integrity**: TEEs must not allow external control over `ReportData` field +- **Reproducible Builds**: Workloads must be built using reproducible processes + +### Security Properties + +- **Block Authenticity**: Cryptographic proof that blocks were produced by authorized TEEs with specific guarantees with the workload id +- **Auditability**: All proofs are recorded onchain for transparency + +## References + +- [Intel TDX Specifications](https://www.intel.com/content/www/us/en/developer/tools/trust-domain-extensions/documentation.html) +- [Intel DCAP Quoting Library API](https://download.01.org/intel-sgx/latest/dcap-latest/linux/docs/Intel_TDX_DCAP_Quoting_Library_API.pdf) +- [Automata DCAP Attestation Contract](https://github.com/automata-network/automata-dcap-attestation) +- [Automata On-chain PCCS](https://github.com/automata-network/automata-on-chain-pccs) diff --git a/rust/rollup-boost/book/src/operators/README.md b/rust/rollup-boost/book/src/operators/README.md new file mode 100644 index 0000000000000..8092d487abfe1 --- /dev/null +++ b/rust/rollup-boost/book/src/operators/README.md @@ -0,0 +1,7 @@ +# For Rollup Operators + +[Running Rollup Boost Locally](./local.md) + +[Running Rollup Boost in Production](./production.md) + +[High Availability Setup](./ha-setup.md) \ No newline at end of file diff --git a/rust/rollup-boost/book/src/operators/ha-setup.md b/rust/rollup-boost/book/src/operators/ha-setup.md new file mode 100644 index 0000000000000..feda8cacbf79e --- /dev/null +++ b/rust/rollup-boost/book/src/operators/ha-setup.md @@ -0,0 +1,66 @@ +# High Availability Setup + +The current OP Stack sequencer HA design relies on `op-conductor` to manage a cluster of sequencers. In the rollup-boost HA setup, each sequencer connects to its own builder instance. In the event of sequencer failover, `op-conductor` elects a new leader, promoting a different `op-node` along with its associated `rollup-boost` and builder instance. + +![HA Setup](https://raw.githubusercontent.com/flashbots/rollup-boost/refs/heads/main/assets/1-1-builder-rb.png) + +If the builder produces undesirable but valid blocks, operators must either manually disable external block production via the `rollup-boost` debug API, disable the block builder directly (causing health checks to fail), or manually select a new sequencer leader. + +See the [design doc](https://github.com/flashbots/rollup-boost/blob/main/docs/rollup-boost-ha.md) for more detail on the design principles for the HA setup with rollup-boost. + +## Health Checks + +In high availability deployments, `op-conductor` must assess the full health of the block production path. Rollup Boost will expose a composite `/healthz` endpoint to report on both builder synchronization and payload production status. These checks allow `op-conductor` to detect degraded block building conditions and make informed leadership decisions. + +Rollup Boost will continuously monitor two independent conditions to inform the health of the builder and the default execution client: + +- **Builder Synchronization**: + A background task periodically queries the builder’s latest unsafe block via `engine_getBlockByNumber`. The task compares the timestamp of the returned block to the local system time. If the difference exceeds a configured maximum unsafe interval (`max_unsafe_interval`), the builder is considered out of sync. Failure to fetch a block from the builder or detection of an outdated block timestamp results in the health status being downgraded to Partial. If the builder is responsive and the block timestamp is within the acceptable interval, the builder is considered synchronized and healthy. Alternatively, instead of periodic polling, builder synchronization can be inferred if the builder returns a `VALID` response to a `newPayload` call forwarded from Rollup Boost. + +- **Payload Production**: + During each `get_payload` request, Rollup Boost will verify payload availability from both the builder and the execution client. If the builder fails to deliver a payload, Rollup Boost will report partial health. If the execution client fails to deliver a payload, Rollup Boost will report unhealthy. + +`op-conductor` should also be configurable in how it interprets health status for failover decisions. This allows chain operators to define thresholds based on their risk tolerance and operational goals. For example, operators may choose to maintain leadership with a sequencer reporting `206 Partial Content` to avoid unnecessary failovers or they may configure `op-conductor` to immediately fail over when any degradation is detected. This flexibility allows the chain operator to configure a failover policy that aligns with network performance expectations and builder reliability. + +
+ +| Condition | Health Status | +|:----------|:--------------| +| Builder is synced and both execution client and builder return payloads | `200 OK` (Healthy) | +| Builder is out of sync| `206 Partial Content` (Partially Healthy) | +| Builder fails to return payload on `get_payload` request | `206 Partial Content` (Partially Healthy) | +| Execution client fails to return payload on `get_payload` request | `503 Service Unavailable` (Unhealthy) | + +`op-conductor` should query the `/healthz` endpoint exposed by Rollup Boost in addition to the existing execution client health checks. Health should be interpreted as follows: + +- `200 OK` (Healthy): The node is fully healthy and eligible for leadership. +- `206 Partial Content` (Partially Healthy): The node is degraded but may be considered for leadership if configured by operator +- `503 Service Unavailable` (Unhealthy): The node is unhealthy and must be excluded from leadership. + +During normal operation and leadership transfers, `op-conductor` should prioritize sequencer candidates in the following order: + +1. Prefer nodes reporting `200 OK`. +2. Nodes that return `503 Service Unavailable` are treated as unhealthy and must not be eligible for sequencer leadership. `op-conductor` should offer a configuration option to treat nodes returning `206 Partial Content` as either healthy or unhealthy. + +Rollup Boost instances that are not actively sequencing rely on the builder sync check to report health, as they are not producing blocks. This behavior mirrors the existing `op-conductor` health checks for inactive sequencers and ensures readiness during failover without compromising network liveness guarantees. Note that `op-conductor` will still evaluate existing sequencer health checks to determine overall sequencer health. + +Note that in the case where the builder is unhealthy, `rollup-boost` should bypass forwarding block production requests to the builder entirely and immediately use the default execution client for payload construction. This avoids introducing unnecessary latency while waiting for the builder response to timeout. + +When builder health is restored, normal request forwarding and payload selection behavior will resume. + +
+ +## Failure Scenarios + +Below is a high level summary of how each failure scenario is handled. All existing failure modes assumed by upstream `op-conductor` are maintained: + +| Failure Scenario | Category | Scenario and Solution | +| --- | --- | --- | +| Leader Sequencer Execution Client Fails | Sequencer Failure | `op-conductor` will detect an unhealthy status from both `rollup-boost` and pre-existing sequencer health checks, causing Conductor to elect a new leader. Once the default execution client has recovered, `rollup-boost` will update its health status to `200` and the sequencer will continue operating normally as a follower. | +| Follower Sequencer Execution Client Fails | Sequencer Failure | Both `rollup-boost` and pre-existing sequencer health checks will report "unhealthy". Once the default execution client has recovered, `rollup-boost` will update its health status to `200` and the sequencer will continue operating normally as a follower. In the event of leadership transfer, this sequencer instance will not be considered for leadership.| +| Leader `rollup-boost` Fails | Rollup Boost Failure | Leader sequencer `rollup-boost` becomes unhealthy, causing `op-conductor`'s sequencer health checks to fail and attempt to elect a new leader. This failure mode is the same as a typical leader sequencer failure. Once the sequencer recovers, it will continue to participate in the cluster as a follower| +| Follower `rollup-boost` Fails | Rollup Boost Failure | Follower sequencer `rollup-boost` becomes unhealthy. The leader sequencer is unaffected. Once the sequencer recovers, it will continue to participate in the cluster as a follower.| +| Leader Builder Stops Producing Blocks | Builder Failure | The builder associated with the sequencer leader stops producing new payloads. `rollup-boost` will detect the builder failure via background health checks and downgrade its health status to partial. This will result in `rollup-boost` ignoring the builder and selecting the default execution client's payload for block production. If `op-conductor` is configured to failover upon partial `rollup-boost` health, a new leader will attempt to be elected. Once the builder recovers and resumes payload production, `rollup-boost` will update its health to `200` and resume with normal operation. | +| Leader Builder Falls Out of Sync | Builder Failure | The builder associated with the sequencer leader falls out of sync with the chain head. `rollup-boost` will detect the unsynced state via the background health checks and downgrade its health status to partial. This will result in `rollup-boost` ignoring builder payloads and selecting the default execution client payload for block until the builder is resynced. If `op-conductor` is configured to failover upon partial `rollup-boost` health, a new leader will attempt to be elected. Once the builder recovers, `rollup-boost` will update its health to `200` and resume with normal operation. | +| Follower Builder Falls Out of Sync | Builder Failure | The builder associated with a follower sequencer falls out of sync with the chain head. Block production is unaffected while the node remains a follower. In the event a leader election occurs and `op-conductor` is configured to treat partial health as "unhealthy", this instance will not be eligible for leadership. Once the builder recovers, `rollup-boost` will report `200 OK` and resume normal operation.| +| Leader Builder Producing Bad Blocks| Builder Failure| In this scenario, the builder is "healthy" but producing bad blocks (eg. empty blocks). If the builder block passes validation via a `new_payload` call to the default execution client, it will be proposed to the network. Manual intervention is needed to either switch to a different sequencer or shutoff the builder. Further mitigation can be introduced via block selection policy allowing `rollup-boost` to select the "healthiest" block. Currently, it is unclear what block selection policy would provide the strongest guarantees.| diff --git a/rust/rollup-boost/book/src/operators/local.md b/rust/rollup-boost/book/src/operators/local.md new file mode 100644 index 0000000000000..8c548a475c713 --- /dev/null +++ b/rust/rollup-boost/book/src/operators/local.md @@ -0,0 +1,28 @@ +# Running Rollup Boost Locally + +To run a local development network, you can use builder-playground to spin up the op-stack with rollup-boost. + +## Builder Playground + +Builder playground is a tool to deploy an end-to-end block builder environment locally. It can be used to test both L1 and OP Stack block builders. + +This will include deploying an OP Stack chain with: + +- A complete L1 setup (CL/EL) +- A complete L2 sequencer (op-geth/op-node/op-batcher) +- Op-rbuilder as the external block builder with Flashblocks support + +```bash +builder-playground cook opstack --external-builder op-rbuilder +``` + +Flags: + +`--enable-latest-fork` (int): Enables the latest fork (isthmus) at startup (0) or n blocks after genesis. +`--flashblocks`: Enables rollup-boost with Flashblocks enabled for pre-confirmations + +In this setup, there is a prefunded test account to send test transactions to: + +- address: 0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 +- private key: ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 + diff --git a/rust/rollup-boost/book/src/operators/production.md b/rust/rollup-boost/book/src/operators/production.md new file mode 100644 index 0000000000000..59af4de8932c2 --- /dev/null +++ b/rust/rollup-boost/book/src/operators/production.md @@ -0,0 +1,214 @@ +# Running Rollup Boost in Production + +## Regular Sequencer Setup + +To run rollup-boost with a regular sequencer setup, change the `--l2` flag in the proposer `op-node` to point to the rollup-boost RPC endpoint. + +To configure rollup-boost, set the L2 URL to the URL of the proposer auth RPC endpoint and the builder URL to the builder auth RPC endpoint. Separate JWT tokens will be needed for the two endpoints. + +You can also set the options using environment variables. See `.env.example` to use the default values. + +```bash +cargo run --bin rollup-boost -- \ + --l2-jwt-token your_jwt_token \ + --l2-url http://localhost:8545 \ + --builder-jwt-token your_jwt_token \ + --builder-url http://localhost:8546 +``` + +To set up a builder, you can use [`op-rbuilder`](https://github.com/flashbots/op-rbuilder) with an op-node instance and have rollup-boost point to the builder auth RPC endpoint. + +## Flashblocks + +To launch rollup-boost with Flashblocks enabled: + +```bash +cargo run --bin rollup-boost -- \ + --l2-url http://localhost:5555 \ + --builder-url http://localhost:4445 \ + --l2-jwt-token 688f5d737bad920bdfb2fc2f488d6b6209eebda1dae949a8de91398d932c517a \ + --builder-jwt-token 688f5d737bad920bdfb2fc2f488d6b6209eebda1dae949a8de91398d932c517a \ + --rpc-port 4444 \ + --flashblocks \ + --log-level info +``` + +This command uses the default Flashblocks configuration. For custom configurations, see the [Flashblocks Configuration](#flashblocks-configuration) section below. + +#### Default Port Configuration + +- `4444`: rollup-boost RPC port +- `4445`: op-rbuilder auth RPC port (matches rollup-boost builder URL) +- `5555`: op-reth auth RPC port (matches rollup-boost L2 URL) +- `3030`: op-rbuilder P2P port +- `3131`: op-reth P2P port + +### Flashblocks Configuration + +rollup-boost provides several configuration options for Flashblocks functionality: + +#### Basic Flashblocks Flag + +- `--flashblocks`: Enable Flashblocks client (required) + - Environment variable: `FLASHBLOCKS` + +#### WebSocket Connection Settings + +- `--flashblocks-builder-url `: Flashblocks Builder WebSocket URL + + - Environment variable: `FLASHBLOCKS_BUILDER_URL` + - Default: `ws://127.0.0.1:1111` + +- `--flashblocks-host `: Flashblocks WebSocket host for outbound connections + + - Environment variable: `FLASHBLOCKS_HOST` + - Default: `127.0.0.1` + +- `--flashblocks-port `: Flashblocks WebSocket port for outbound connections + - Environment variable: `FLASHBLOCKS_PORT` + - Default: `1112` + +#### Connection Management + +- `--flashblock-builder-ws-reconnect-ms `: Timeout duration if builder disconnects + - Environment variable: `FLASHBLOCK_BUILDER_WS_RECONNECT_MS` + - No default value specified + +## Execution mode + +`ExecutionMode` is a configuration setting that controls how `rollup-boost` interacts with the external builder during block production. Execution mode can be set either at startup via CLI flags or dynamically modified at runtime through the [Debug API](#debug-api). +Operators can use `ExecutionMode` to selectively forward or bypass builder interactions, enabling dry runs during deployments or fully disabling external block production during emergencies. + +The available execution modes are: + +- `Enabled` + - `rollup-boost` forwards all Engine API requests to both the builder and default execution client. + - Optimistically selects the builder’s payload for validation and block publication. + - Falls back to the local execution client *only* if the builder fails to produce a payload or the payload is invalid. + - Default setting for normal external block production. + +- `DryRun` + - `rollup-boost` forwards all Engine API requests to both the builder and default execution client. + - Builder payloads are validated with the local execution client but the default execution client block will always be returned to `op-node` to propagate to the network. + - Useful during deployments, dry runs, or to validate builder behavior without publishing builder blocks to the network. + +- `Disabled` + - `rollup-boost` does not forward any Engine API requests to the builder. + - Block construction is handled exclusively by the default execution client. + - Useful as an emergency shutoff switch in the case of critical failures/emergencies. + +```rust +pub enum ExecutionMode { + /// Forward Engine API requests to the builder, validate builder payloads and propagate to the network + Enabled, + /// Forward Engine API requests to the builder, validate builder payloads but + /// fallback to default execution payload + DryRun, + // Do not forward Engine API requests to the builder + Disabled, +} +``` + +
+ +## Debug API + +`rollup-boost` exposes a Debug API that allows operators to inspect and modify the current execution mode at runtime without restarting the service. This provides flexibility to dynamically enable, disable, or dry-run external block production based on builder behavior or network conditions. The Debug API is served over HTTP using JSON RPC and consists of the following endpoints: + +### `debug_setExecutionMode` + +Sets the current execution mode for `rollup-boost`. + +**Request**: + +```json +{ + "method": "debug_setExecutionMode", + "params": [ "enabled" | "dry_run" | "disabled" ], + "id": 1, + "jsonrpc": "2.0" +} +``` + +**Response**: + +```json +{ + "result": null, + "id": 1, + "jsonrpc": "2.0" +} +``` + +### `debug_getExecutionMode` + +Retrieves the current execution mode. + +**Request**: + +```json +{ + "method": "debug_getExecutionMode", + "params": [], + "id": 1, + "jsonrpc": "2.0" +} +``` + +**Response:** + +```json +{ + "result": "enabled" | "dry_run" | "disabled", + "id": 1, + "jsonrpc": "2.0" +} +``` + +## Observability + +### Metrics + +To enable metrics, you can set the `--metrics` flag. This will start a metrics server which will run on port 9090 by default. To see the list of metrics, you can check out metrics.rs and ping the metrics endpoint: + +```bash +curl http://localhost:9090/metrics +``` + +All spans create duration histogram metrics with the name "{span_name}\_duration". Currently, this list includes: + +- fork_choice_updated_duration +- get_payload_duration +- new_payload_duration + +Additionally, execution engines such as op-rbuilder have RPC metrics exposed to check if `engine_getPayloadV4` requests have been received. To check if the builder blocks are landing on-chain, the builder can be configured to include a builder transaction in the block, which is captured as part of the builder metrics. To see more details about observability in the op-builder, you can check op-rbuilder's [README](https://github.com/flashbots/op-rbuilder?tab=readme-ov-file#observability). + +#### Flashblocks + +There are metrics and observability in all the services supporting Flashblocks. In rollup-boost: + +- `messages_processed` - number of messages processed from the Flashblocks WebSocket stream +- `flashblocks_counter` - number of Flashblocks proposed +- `flashblocks_missing_counter` - number of Flashblocks missed from the expected number of Flashblocks + +Additionally, the builder transaction can also be observed in the last Flashblock to determine if the number of expected Flashblocks has been included on-chain. + +### Tracing + +Tracing is enabled by setting the `--tracing` flag. This will start exporting traces to the otlp endpoint specified in the `--otlp-endpoint` flag. This endpoint is set to `http://localhost:4317` by default. + +Traces use the payload id to track the block building lifecycle. A distributed tracing system such as [Jaeger](https://www.jaegertracing.io/) can be used to visualize when the proposer triggers block building via `engine_forkchoiceUpdated` and retrieve the block with `engine_getPayload`. + +## Troubleshooting Builder Responses + +### Invalid Builder Payloads + +If there are logs around the builder payload being invalid, it is likely there is an issue with the builder and you will need to contact the builder operator to resolve it. In this case rollup-boost will use the local payload and chain liveness will not be affected. You can also manually set rollup-boost to dry run mode using the debug api to stop payload requests to the builder, silencing the error logs. + +It is also possible that either the builder or the proposer execution engine are not running on compatible hard fork versions. Please check that the clients are running on compatible versions of the op-stack. + +### Builder Syncing + +Alternatively, the builder may be syncing with the chain and not have a block to respond with. You can see in the logs the builder is syncing by checking whether the payload_status of builder calls is `SYNCING`. + +This is expected if the builder is still syncing with the chain. Chain liveness will not be affected as rollup-boost will use the local payload. Contact the builder operator if the sync status persists as the builder op-node may be offline or not peered correctly with the network. \ No newline at end of file diff --git a/rust/rollup-boost/book/theme/head.hbs b/rust/rollup-boost/book/theme/head.hbs new file mode 100644 index 0000000000000..e9a1a36020c03 --- /dev/null +++ b/rust/rollup-boost/book/theme/head.hbs @@ -0,0 +1,10 @@ + + + +{{!-- TODO: add logo --}} +{{!-- --}} +{{!-- --}} + + \ No newline at end of file diff --git a/rust/rollup-boost/crates/rollup-boost-types/Cargo.toml b/rust/rollup-boost/crates/rollup-boost-types/Cargo.toml new file mode 100644 index 0000000000000..b135e35319f19 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost-types/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "rollup-boost-types" +version = "0.1.0" +edition.workspace = true +description = "Core types for Rollup Boost" +license.workspace = true +readme = "../../README.md" + +[dependencies] +alloy-primitives = { workspace = true, features = ["serde"] } +alloy-rlp.workspace = true +alloy-rpc-types-engine = { workspace = true, features = ["serde"] } +alloy-rpc-types-eth = { workspace = true, features = ["serde"] } +alloy-serde.workspace = true +futures.workspace = true +moka = { workspace = true, features = ["future"] } +op-alloy-rpc-types-engine = { workspace = true, features = ["serde"] } +serde = { workspace = true, features = ["derive", "std"] } +serde_json = { workspace = true, features = ["std"] } +tracing.workspace = true +thiserror.workspace = true +ed25519-dalek.workspace = true +blake3.workspace = true diff --git a/rust/rollup-boost/crates/rollup-boost-types/src/authorization.rs b/rust/rollup-boost/crates/rollup-boost-types/src/authorization.rs new file mode 100644 index 0000000000000..d9e8783e7503b --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost-types/src/authorization.rs @@ -0,0 +1,200 @@ +use alloy_primitives::{B64, Bytes}; +use alloy_rlp::{Decodable, Encodable, Header}; +use alloy_rpc_types_engine::PayloadId; +use ed25519_dalek::{Signature, Signer, SigningKey, Verifier, VerifyingKey}; +use serde::{Deserialize, Serialize}; +use thiserror::Error; + +/// An authorization token that grants a builder permission to publish flashblocks for a specific +/// payload. +/// +/// The `authorizer_sig` is made over the `payload_id`, `timestamp`, and `builder_vk`. This is +/// useful because it allows the authorizer to control which builders can publish flashblocks in +/// real time, without relying on consumers to verify the builder's public key against a +/// pre-defined list. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct Authorization { + /// The unique identifier of the payload this authorization applies to + pub payload_id: PayloadId, + /// Unix timestamp when this authorization was created + pub timestamp: u64, + /// The public key of the builder who is authorized to sign messages + pub builder_vk: VerifyingKey, + /// The authorizer's signature over the payload_id, timestamp, and builder_vk + pub authorizer_sig: Signature, +} + +#[derive(Debug, Error, PartialEq)] +pub enum AuthorizationError { + #[error("invalid authorizer signature")] + InvalidAuthorizerSig, +} + +impl Authorization { + /// Creates a new authorization token for a builder to publish messages for a specific payload. + /// + /// This function creates a cryptographic authorization by signing a message containing the + /// payload ID, timestamp, and builder's public key using the authorizer's signing key. + /// + /// # Arguments + /// + /// * `payload_id` - The unique identifier of the payload this authorization applies to + /// * `timestamp` - Unix timestamp associated with this `payload_id` + /// * `authorizer_sk` - The authorizer's signing key used to create the signature + /// * `actor_vk` - The verifying key of the actor being authorized + /// + /// # Returns + /// + /// A new `Authorization` instance with the generated signature + pub fn new( + payload_id: PayloadId, + timestamp: u64, + authorizer_sk: &SigningKey, + actor_vk: VerifyingKey, + ) -> Self { + let mut msg = payload_id.0.to_vec(); + msg.extend_from_slice(×tamp.to_le_bytes()); + msg.extend_from_slice(actor_vk.as_bytes()); + let hash = blake3::hash(&msg); + let sig = authorizer_sk.sign(hash.as_bytes()); + + Self { payload_id, timestamp, builder_vk: actor_vk, authorizer_sig: sig } + } + + /// Verifies the authorization signature against the provided authorizer's verifying key. + /// + /// This function reconstructs the signed message from the authorization data and verifies + /// that the signature was created by the holder of the authorizer's private key. + /// + /// # Arguments + /// + /// * `authorizer_sk` - The verifying key of the authorizer to verify against + /// + /// # Returns + /// + /// * `Ok(())` if the signature is valid + /// * `Err(FlashblocksP2PError::InvalidAuthorizerSig)` if the signature is invalid + pub fn verify(&self, authorizer_sk: VerifyingKey) -> Result<(), AuthorizationError> { + let mut msg = self.payload_id.0.to_vec(); + msg.extend_from_slice(&self.timestamp.to_le_bytes()); + msg.extend_from_slice(self.builder_vk.as_bytes()); + let hash = blake3::hash(&msg); + authorizer_sk + .verify(hash.as_bytes(), &self.authorizer_sig) + .map_err(|_| AuthorizationError::InvalidAuthorizerSig) + } +} + +impl Encodable for Authorization { + fn encode(&self, out: &mut dyn alloy_rlp::BufMut) { + // pre-serialize the key & sig once so we can reuse the bytes & lengths + let pub_bytes = Bytes::copy_from_slice(self.builder_vk.as_bytes()); // 33 bytes + let sig_bytes = Bytes::copy_from_slice(&self.authorizer_sig.to_bytes()); // 64 bytes + + let payload_len = self.payload_id.0.length() + + self.timestamp.length() + + pub_bytes.length() + + sig_bytes.length(); + + Header { list: true, payload_length: payload_len }.encode(out); + + // 1. payload_id (inner B64 already Encodable) + self.payload_id.0.encode(out); + // 2. timestamp + self.timestamp.encode(out); + // 3. builder_pub + pub_bytes.encode(out); + // 4. authorizer_sig + sig_bytes.encode(out); + } + + fn length(&self) -> usize { + let pub_bytes = Bytes::copy_from_slice(self.builder_vk.as_bytes()); + let sig_bytes = Bytes::copy_from_slice(&self.authorizer_sig.to_bytes()); + + let payload_len = self.payload_id.0.length() + + self.timestamp.length() + + pub_bytes.length() + + sig_bytes.length(); + + Header { list: true, payload_length: payload_len }.length() + payload_len + } +} + +impl Decodable for Authorization { + fn decode(buf: &mut &[u8]) -> Result { + let header = Header::decode(buf)?; + if !header.list { + return Err(alloy_rlp::Error::UnexpectedString); + } + let mut body = &buf[..header.payload_length]; + + // 1. payload_id + let payload_id = alloy_rpc_types_engine::PayloadId(B64::decode(&mut body)?); + + // 2. timestamp + let timestamp = u64::decode(&mut body)?; + + // 3. builder_pub + let pub_bytes = Bytes::decode(&mut body)?; + let builder_pub = VerifyingKey::try_from(pub_bytes.as_ref()) + .map_err(|_| alloy_rlp::Error::Custom("bad builder_pub"))?; + + // 4. authorizer_sig + let sig_bytes = Bytes::decode(&mut body)?; + let authorizer_sig = Signature::try_from(sig_bytes.as_ref()) + .map_err(|_| alloy_rlp::Error::Custom("bad signature"))?; + + // advance caller’s slice cursor + *buf = &buf[header.payload_length..]; + + Ok(Self { payload_id, timestamp, builder_vk: builder_pub, authorizer_sig }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_rlp::{Decodable, Encodable, encode}; + + fn key_pair(seed: u8) -> (SigningKey, VerifyingKey) { + let bytes = [seed; 32]; + let sk = SigningKey::from_bytes(&bytes); + let vk = sk.verifying_key(); + (sk, vk) + } + + #[test] + fn authorization_rlp_roundtrip_and_verify() { + let (authorizer_sk, authorizer_vk) = key_pair(1); + let (_, builder_vk) = key_pair(2); + + let auth = + Authorization::new(PayloadId::default(), 1_700_000_123, &authorizer_sk, builder_vk); + + let encoded = encode(auth); + assert_eq!(encoded.len(), auth.length(), "length impl correct"); + + let mut slice = encoded.as_ref(); + let decoded = Authorization::decode(&mut slice).expect("decoding succeeds"); + assert!(slice.is_empty(), "decoder consumed all bytes"); + assert_eq!(decoded, auth, "round-trip preserves value"); + + // Signature is valid + decoded.verify(authorizer_vk).expect("signature verifies"); + } + + #[test] + fn authorization_signature_tamper_is_detected() { + let (authorizer_sk, authorizer_vk) = key_pair(1); + let (_, builder_vk) = key_pair(2); + + let mut auth = Authorization::new(PayloadId::default(), 42, &authorizer_sk, builder_vk); + + let mut sig_bytes = auth.authorizer_sig.to_bytes(); + sig_bytes[0] ^= 1; + auth.authorizer_sig = Signature::try_from(sig_bytes.as_ref()).unwrap(); + + assert!(auth.verify(authorizer_vk).is_err()); + } +} diff --git a/rust/rollup-boost/crates/rollup-boost-types/src/flashblocks.rs b/rust/rollup-boost/crates/rollup-boost-types/src/flashblocks.rs new file mode 100644 index 0000000000000..eec0f7d73aa28 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost-types/src/flashblocks.rs @@ -0,0 +1,271 @@ +use alloy_primitives::{Address, B64, B256, Bloom, Bytes, U256}; +use alloy_rlp::{Decodable, Encodable, Header, RlpDecodable, RlpEncodable}; +use alloy_rpc_types_engine::PayloadId; +use alloy_rpc_types_eth::Withdrawal; +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +/// Represents the modified portions of an execution payload within a flashblock. +/// This structure contains only the fields that can be updated during block construction, +/// such as state root, receipts, logs, and new transactions. Other immutable block fields +/// like parent hash and block number are excluded since they remain constant throughout +/// the block's construction. +#[derive( + Clone, Debug, PartialEq, Default, Deserialize, Serialize, Eq, RlpEncodable, RlpDecodable, +)] +#[rlp(trailing)] +pub struct ExecutionPayloadFlashblockDeltaV1 { + /// The state root of the block. + pub state_root: B256, + /// The receipts root of the block. + pub receipts_root: B256, + /// The logs bloom of the block. + pub logs_bloom: Bloom, + /// The gas used of the block. + #[serde(with = "alloy_serde::quantity")] + pub gas_used: u64, + /// The block hash of the block. + pub block_hash: B256, + /// The transactions of the block. + pub transactions: Vec, + /// Array of [`Withdrawal`] enabled with V2 + pub withdrawals: Vec, + /// The withdrawals root of the block. + pub withdrawals_root: B256, + /// The blob gas used + #[serde(default, skip_serializing_if = "Option::is_none", with = "alloy_serde::quantity::opt")] + pub blob_gas_used: Option, +} + +/// Represents the base configuration of an execution payload that remains constant +/// throughout block construction. This includes fundamental block properties like +/// parent hash, block number, and other header fields that are determined at +/// block creation and cannot be modified. +#[derive( + Clone, Debug, PartialEq, Default, Deserialize, Serialize, Eq, RlpEncodable, RlpDecodable, +)] +pub struct ExecutionPayloadBaseV1 { + /// Ecotone parent beacon block root + pub parent_beacon_block_root: B256, + /// The parent hash of the block. + pub parent_hash: B256, + /// The fee recipient of the block. + pub fee_recipient: Address, + /// The previous randao of the block. + pub prev_randao: B256, + /// The block number. + #[serde(with = "alloy_serde::quantity")] + pub block_number: u64, + /// The gas limit of the block. + #[serde(with = "alloy_serde::quantity")] + pub gas_limit: u64, + /// The timestamp of the block. + #[serde(with = "alloy_serde::quantity")] + pub timestamp: u64, + /// The extra data of the block. + pub extra_data: Bytes, + /// The base fee per gas of the block. + pub base_fee_per_gas: U256, +} + +#[derive(Clone, Debug, PartialEq, Default, Deserialize, Serialize, Eq)] +pub struct FlashblocksPayloadV1 { + /// The payload id of the flashblock + pub payload_id: PayloadId, + /// The index of the flashblock in the block + pub index: u64, + /// The delta/diff containing modified portions of the execution payload + pub diff: ExecutionPayloadFlashblockDeltaV1, + /// Additional metadata associated with the flashblock + pub metadata: Value, + /// The base execution payload configuration + #[serde(skip_serializing_if = "Option::is_none")] + pub base: Option, +} + +/// Manual RLP implementation because `PayloadId` and `serde_json::Value` are +/// outside of alloy-rlp’s blanket impls. +impl Encodable for FlashblocksPayloadV1 { + fn encode(&self, out: &mut dyn alloy_rlp::BufMut) { + // ---- compute payload length ------------------------------------------------- + let json_bytes = Bytes::from( + serde_json::to_vec(&self.metadata).expect("serialising `metadata` to JSON never fails"), + ); + + // encoded-len helper — empty string is one byte (`0x80`) + let empty_len = 1usize; + + let base_len = self.base.as_ref().map(|b| b.length()).unwrap_or(empty_len); + + let payload_len = self.payload_id.0.length() + + self.index.length() + + self.diff.length() + + json_bytes.length() + + base_len; + + Header { list: true, payload_length: payload_len }.encode(out); + + // 1. `payload_id` – the inner `B64` already impls `Encodable` + self.payload_id.0.encode(out); + + // 2. `index` + self.index.encode(out); + + // 3. `diff` + self.diff.encode(out); + + // 4. `metadata` (as raw JSON bytes) + json_bytes.encode(out); + + // 5. `base` (`Option` as “value | empty string”) + if let Some(base) = &self.base { + base.encode(out); + } else { + // RLP encoding for empty value + out.put_u8(0x80); + } + } + + fn length(&self) -> usize { + let json_bytes = Bytes::from( + serde_json::to_vec(&self.metadata).expect("serialising `metadata` to JSON never fails"), + ); + + let empty_len = 1usize; + + let base_len = self.base.as_ref().map(|b| b.length()).unwrap_or(empty_len); + + // list header length + payload length + let payload_length = self.payload_id.0.length() + + self.index.length() + + self.diff.length() + + json_bytes.length() + + base_len; + + Header { list: true, payload_length }.length() + payload_length + } +} + +impl Decodable for FlashblocksPayloadV1 { + fn decode(buf: &mut &[u8]) -> Result { + let header = Header::decode(buf)?; + if !header.list { + return Err(alloy_rlp::Error::UnexpectedString); + } + + // Limit the decoding window to the list payload only. + let mut body = &buf[..header.payload_length]; + + let payload_id = B64::decode(&mut body)?.into(); + let index = u64::decode(&mut body)?; + let diff = ExecutionPayloadFlashblockDeltaV1::decode(&mut body)?; + + // metadata – stored as raw JSON bytes + let meta_bytes = Bytes::decode(&mut body)?; + let metadata: Value = serde_json::from_slice(&meta_bytes) + .map_err(|_| alloy_rlp::Error::Custom("bad JSON"))?; + + // base (`Option`) + let base = if body.first() == Some(&0x80) { + None + } else { + Some(ExecutionPayloadBaseV1::decode(&mut body)?) + }; + + // advance the original buffer cursor + *buf = &buf[header.payload_length..]; + + Ok(Self { payload_id, index, diff, metadata, base }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_rlp::{Decodable, encode}; + + fn sample_diff() -> ExecutionPayloadFlashblockDeltaV1 { + ExecutionPayloadFlashblockDeltaV1 { + state_root: B256::from([1u8; 32]), + receipts_root: B256::from([2u8; 32]), + logs_bloom: Bloom::default(), + gas_used: 21_000, + block_hash: B256::from([3u8; 32]), + transactions: vec![Bytes::from(vec![0xde, 0xad, 0xbe, 0xef])], + withdrawals: vec![Withdrawal::default()], + withdrawals_root: B256::from([4u8; 32]), + blob_gas_used: None, + } + } + + fn sample_base() -> ExecutionPayloadBaseV1 { + ExecutionPayloadBaseV1 { + parent_beacon_block_root: B256::from([5u8; 32]), + parent_hash: B256::from([6u8; 32]), + fee_recipient: Address::from([0u8; 20]), + prev_randao: B256::from([7u8; 32]), + block_number: 123, + gas_limit: 30_000_000, + timestamp: 1_700_000_000, + extra_data: Bytes::from(b"hello".to_vec()), + base_fee_per_gas: U256::from(1_000_000_000u64), + } + } + + #[test] + fn roundtrip_without_base() { + let original = FlashblocksPayloadV1 { + payload_id: PayloadId::default(), + index: 0, + diff: sample_diff(), + metadata: serde_json::json!({ "key": "value" }), + base: None, + }; + + let encoded = encode(&original); + assert_eq!(encoded.len(), original.length(), "length() must match actually-encoded size"); + + let mut slice = encoded.as_ref(); + let decoded = FlashblocksPayloadV1::decode(&mut slice).expect("decode succeeds"); + assert_eq!(original, decoded, "round-trip must be loss-less"); + assert!(slice.is_empty(), "decoder should consume the entire input buffer"); + } + + #[test] + fn roundtrip_with_base() { + let original = FlashblocksPayloadV1 { + payload_id: PayloadId::default(), + index: 42, + diff: sample_diff(), + metadata: serde_json::json!({ "foo": 1, "bar": [1, 2, 3] }), + base: Some(sample_base()), + }; + + let encoded = encode(&original); + assert_eq!(encoded.len(), original.length()); + + let mut slice = encoded.as_ref(); + let decoded = FlashblocksPayloadV1::decode(&mut slice).expect("decode succeeds"); + assert_eq!(original, decoded); + assert!(slice.is_empty()); + } + + #[test] + fn invalid_rlp_is_rejected() { + let valid = FlashblocksPayloadV1 { + payload_id: PayloadId::default(), + index: 1, + diff: sample_diff(), + metadata: serde_json::json!({}), + base: None, + }; + + // Encode, then truncate the last byte to corrupt the payload. + let mut corrupted = encode(&valid); + corrupted.pop(); + + let mut slice = corrupted.as_ref(); + let result = FlashblocksPayloadV1::decode(&mut slice); + assert!(result.is_err(), "decoder must flag malformed / truncated input"); + } +} diff --git a/rust/rollup-boost/crates/rollup-boost-types/src/lib.rs b/rust/rollup-boost/crates/rollup-boost-types/src/lib.rs new file mode 100644 index 0000000000000..d6fb6daf17be0 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost-types/src/lib.rs @@ -0,0 +1,7 @@ +pub mod authorization; +pub mod flashblocks; +pub mod payload; + +pub mod ed25519_dalek { + pub use ed25519_dalek::*; +} diff --git a/rust/rollup-boost/crates/rollup-boost-types/src/payload.rs b/rust/rollup-boost/crates/rollup-boost-types/src/payload.rs new file mode 100644 index 0000000000000..7e984f23b1e03 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost-types/src/payload.rs @@ -0,0 +1,287 @@ +use alloy_primitives::{B256, Bytes}; +use futures::{StreamExt as _, stream}; +use moka::future::Cache; + +use alloy_rpc_types_engine::{ExecutionPayload, ExecutionPayloadV3, PayloadAttributes, PayloadId}; +use op_alloy_rpc_types_engine::{ + OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4, OpExecutionPayloadV4, +}; + +const CACHE_SIZE: u64 = 100; + +#[derive(Debug, Clone)] +pub enum OpExecutionPayloadEnvelope { + V3(OpExecutionPayloadEnvelopeV3), + V4(OpExecutionPayloadEnvelopeV4), +} + +impl OpExecutionPayloadEnvelope { + pub fn version(&self) -> PayloadVersion { + match self { + OpExecutionPayloadEnvelope::V3(_) => PayloadVersion::V3, + OpExecutionPayloadEnvelope::V4(_) => PayloadVersion::V4, + } + } + + pub fn gas_used(&self) -> u64 { + match self { + OpExecutionPayloadEnvelope::V3(payload) => { + payload.execution_payload.payload_inner.payload_inner.gas_used + } + + OpExecutionPayloadEnvelope::V4(payload) => { + payload.execution_payload.payload_inner.payload_inner.payload_inner.gas_used + } + } + } + + pub fn tx_count(&self) -> usize { + match self { + OpExecutionPayloadEnvelope::V3(payload) => { + payload.execution_payload.payload_inner.payload_inner.transactions.len() + } + OpExecutionPayloadEnvelope::V4(payload) => payload + .execution_payload + .payload_inner + .payload_inner + .payload_inner + .transactions + .len(), + } + } + + pub fn transactions(&self) -> Vec { + match self { + OpExecutionPayloadEnvelope::V3(payload) => { + payload.execution_payload.payload_inner.payload_inner.transactions.clone() + } + OpExecutionPayloadEnvelope::V4(payload) => payload + .execution_payload + .payload_inner + .payload_inner + .payload_inner + .transactions + .clone(), + } + } + + pub fn payload_attributes(&self) -> PayloadAttributes { + match self { + OpExecutionPayloadEnvelope::V3(payload) => PayloadAttributes { + timestamp: payload.execution_payload.payload_inner.timestamp(), + prev_randao: payload.execution_payload.payload_inner.payload_inner.prev_randao, + suggested_fee_recipient: payload + .execution_payload + .payload_inner + .payload_inner + .fee_recipient, + withdrawals: Some(payload.execution_payload.withdrawals().clone()), + parent_beacon_block_root: Some(payload.parent_beacon_block_root), + }, + OpExecutionPayloadEnvelope::V4(payload) => PayloadAttributes { + timestamp: payload.execution_payload.payload_inner.timestamp(), + prev_randao: payload + .execution_payload + .payload_inner + .payload_inner + .payload_inner + .prev_randao, + suggested_fee_recipient: payload + .execution_payload + .payload_inner + .payload_inner + .payload_inner + .fee_recipient, + withdrawals: Some(payload.execution_payload.payload_inner.withdrawals().clone()), + parent_beacon_block_root: Some(payload.parent_beacon_block_root), + }, + } + } +} + +impl From for ExecutionPayload { + fn from(envelope: OpExecutionPayloadEnvelope) -> Self { + match envelope { + OpExecutionPayloadEnvelope::V3(v3) => ExecutionPayload::from(v3.execution_payload), + OpExecutionPayloadEnvelope::V4(v4) => { + ExecutionPayload::from(v4.execution_payload.payload_inner) + } + } + } +} + +#[derive(Debug, Clone)] +pub struct NewPayloadV3 { + pub payload: ExecutionPayloadV3, + pub versioned_hashes: Vec, + pub parent_beacon_block_root: B256, +} + +#[derive(Debug, Clone)] +pub struct NewPayloadV4 { + pub payload: OpExecutionPayloadV4, + pub versioned_hashes: Vec, + pub parent_beacon_block_root: B256, + pub execution_requests: Vec, +} + +#[derive(Debug, Clone)] +pub enum NewPayload { + V3(NewPayloadV3), + V4(NewPayloadV4), +} + +impl NewPayload { + pub fn version(&self) -> PayloadVersion { + match self { + NewPayload::V3(_) => PayloadVersion::V3, + NewPayload::V4(_) => PayloadVersion::V4, + } + } +} + +impl From for NewPayload { + fn from(envelope: OpExecutionPayloadEnvelope) -> Self { + match envelope { + OpExecutionPayloadEnvelope::V3(v3) => NewPayload::V3(NewPayloadV3 { + payload: v3.execution_payload, + versioned_hashes: vec![], + parent_beacon_block_root: v3.parent_beacon_block_root, + }), + OpExecutionPayloadEnvelope::V4(v4) => NewPayload::V4(NewPayloadV4 { + payload: v4.execution_payload, + versioned_hashes: vec![], + parent_beacon_block_root: v4.parent_beacon_block_root, + execution_requests: v4.execution_requests, + }), + } + } +} + +impl From for ExecutionPayload { + fn from(new_payload: NewPayload) -> Self { + match new_payload { + NewPayload::V3(v3) => ExecutionPayload::from(v3.payload), + NewPayload::V4(v4) => ExecutionPayload::from(v4.payload.payload_inner), + } + } +} + +#[derive(Debug, Clone, Copy)] +pub enum PayloadVersion { + V3, + V4, +} + +impl PayloadVersion { + pub fn as_str(&self) -> &'static str { + match self { + PayloadVersion::V3 => "v3", + PayloadVersion::V4 => "v4", + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum PayloadSource { + L2, + Builder, +} + +impl std::fmt::Display for PayloadSource { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + PayloadSource::L2 => write!(f, "l2"), + PayloadSource::Builder => write!(f, "builder"), + } + } +} + +#[allow(dead_code)] +impl PayloadSource { + pub fn is_builder(&self) -> bool { + matches!(self, PayloadSource::Builder) + } + + pub fn is_l2(&self) -> bool { + matches!(self, PayloadSource::L2) + } +} + +#[derive(Debug, Clone)] +pub struct PayloadTrace { + pub builder_has_payload: bool, + pub trace_id: Option, +} + +#[derive(Debug)] +pub struct PayloadTraceContext { + block_hash_to_payload_ids: Cache>, + payload_id: Cache, +} + +impl Default for PayloadTraceContext { + fn default() -> Self { + Self::new() + } +} + +impl PayloadTraceContext { + pub fn new() -> Self { + PayloadTraceContext { + block_hash_to_payload_ids: Cache::new(CACHE_SIZE), + payload_id: Cache::new(CACHE_SIZE), + } + } + + pub async fn store( + &self, + payload_id: PayloadId, + parent_hash: B256, + builder_has_payload: bool, + trace_id: Option, + ) { + self.payload_id.insert(payload_id, PayloadTrace { builder_has_payload, trace_id }).await; + self.block_hash_to_payload_ids + .entry(parent_hash) + .and_upsert_with(|o| match o { + Some(e) => { + let mut payloads = e.into_value(); + payloads.push(payload_id); + std::future::ready(payloads) + } + None => std::future::ready(vec![payload_id]), + }) + .await; + } + + pub async fn trace_ids_from_parent_hash(&self, parent_hash: &B256) -> Option> { + match self.block_hash_to_payload_ids.get(parent_hash).await { + Some(payload_ids) => Some( + stream::iter(payload_ids.iter()) + .filter_map(|payload_id| async { + self.payload_id.get(payload_id).await.and_then(|x| x.trace_id) + }) + .collect() + .await, + ), + None => None, + } + } + + pub async fn trace_id(&self, payload_id: &PayloadId) -> Option { + self.payload_id.get(payload_id).await.and_then(|x| x.trace_id) + } + + pub async fn has_builder_payload(&self, payload_id: &PayloadId) -> bool { + self.payload_id.get(payload_id).await.map(|x| x.builder_has_payload).unwrap_or_default() + } + + pub async fn remove_by_parent_hash(&self, block_hash: &B256) { + if let Some(payload_ids) = self.block_hash_to_payload_ids.remove(block_hash).await { + for payload_id in payload_ids.iter() { + self.payload_id.remove(payload_id).await; + } + } + } +} diff --git a/rust/rollup-boost/crates/rollup-boost/Cargo.toml b/rust/rollup-boost/crates/rollup-boost/Cargo.toml new file mode 100644 index 0000000000000..76daf52ade4a2 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/Cargo.toml @@ -0,0 +1,100 @@ +[package] +name = "rollup-boost" +version = "0.7.13" +edition.workspace = true +description = "Rollup Boost is a sidecar for optimism rollups that enables rollup extensions" +rust-version.workspace = true +license.workspace = true +readme = "../../README.md" + +[dependencies] +# Internal +rollup-boost-types.workspace = true + +tracing.workspace = true +tracing-subscriber = { workspace = true, features = ["env-filter", "json", "fmt", "std"] } +clap = { workspace = true, features = ["derive", "env"] } +futures.workspace = true +thiserror.workspace = true +serde = { workspace = true, features = ["derive", "std"] } +serde_json = { workspace = true, features = ["std"] } +metrics.workspace = true +tokio = { workspace = true, features = ["full"] } +eyre.workspace = true +url.workspace = true +sha2.workspace = true +moka = { workspace = true, features = ["future"] } +ed25519-dalek.workspace = true +blake3.workspace = true +hex.workspace = true + +op-alloy-rpc-types-engine = { workspace = true, features = ["serde"] } +alloy-rpc-types-engine = { workspace = true, features = ["serde", "jwt"] } +alloy-rpc-types-eth = { workspace = true, features = ["serde"] } +alloy-primitives = { workspace = true, features = ["rand", "serde"] } +alloy-serde.workspace = true +alloy-rlp.workspace = true + +tokio-tungstenite = { workspace = true, features = ["native-tls"] } +metrics-derive.workspace = true + +jsonrpsee = { workspace = true, features = ["server", "http-client", "macros"] } +http.workspace = true +dotenvy.workspace = true +tower = { workspace = true, features = ["timeout"] } +tower-http = { workspace = true, features = [ + "decompression-full", + "sensitive-headers", +] } +http-body-util.workspace = true +hyper = { workspace = true, features = ["full"] } +hyper-util = { workspace = true, features = ["full"] } +hyper-rustls = { workspace = true, features = ["ring"] } +rustls = { workspace = true, features = ["ring"] } +opentelemetry = { workspace = true, features = ["trace"] } +opentelemetry-otlp = { workspace = true, features = [ + "http-proto", + "http-json", + "reqwest-client", + "trace", + "grpc-tonic", +] } +opentelemetry_sdk = { workspace = true, features = ["rt-tokio"] } +tracing-opentelemetry.workspace = true +metrics-exporter-prometheus.workspace = true +metrics-util.workspace = true +paste.workspace = true +parking_lot.workspace = true +tokio-util.workspace = true +dashmap.workspace = true +backoff.workspace = true +uuid = { workspace = true, features = ["v7"] } +bytes.workspace = true +lru.workspace = true + +[dev-dependencies] +tempfile.workspace = true +serial_test.workspace = true +rand.workspace = true +time.workspace = true +op-alloy-consensus.workspace = true +alloy-eips = { workspace = true, features = ["serde"] } +alloy-consensus = { workspace = true, features = ["serde"] } +anyhow.workspace = true +ctor.workspace = true +reqwest.workspace = true +testcontainers.workspace = true + +[features] +integration = [] + +[build-dependencies] +vergen = { workspace = true, features = ["build", "cargo", "emit_and_set"] } +vergen-git2.workspace = true + +[[bin]] +name = "rollup-boost" +path = "src/bin/main.rs" + +[lib] +path = "src/lib.rs" diff --git a/rust/rollup-boost/crates/rollup-boost/build.rs b/rust/rollup-boost/crates/rollup-boost/build.rs new file mode 100644 index 0000000000000..e0e3a35bc325e --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/build.rs @@ -0,0 +1,24 @@ +use std::{env, error::Error}; +use vergen::{BuildBuilder, Emitter}; +use vergen_git2::Git2Builder; + +fn main() -> Result<(), Box> { + let mut emitter = Emitter::default(); + + let build_builder = BuildBuilder::default().build_timestamp(true).build()?; + emitter.add_instructions(&build_builder)?; + + let git_builder = + Git2Builder::default().describe(false, true, None).dirty(true).sha(false).build()?; + + emitter.add_instructions(&git_builder)?; + + emitter.emit_and_set()?; + let sha = env::var("VERGEN_GIT_SHA")?; + let sha_short = &sha[0..8]; + + // Set short SHA + println!("cargo:rustc-env=VERGEN_GIT_SHA_SHORT={}", &sha_short); + + Ok(()) +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/bin/main.rs b/rust/rollup-boost/crates/rollup-boost/src/bin/main.rs new file mode 100644 index 0000000000000..ddde5c63d78e2 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/bin/main.rs @@ -0,0 +1,12 @@ +use clap::Parser; +use dotenvy::dotenv; +use rollup_boost::{RollupBoostServiceArgs, init_tracing}; + +#[tokio::main] +async fn main() -> eyre::Result<()> { + dotenv().ok(); + + let args = RollupBoostServiceArgs::parse(); + init_tracing(&args)?; + args.run().await +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/cli.rs b/rust/rollup-boost/crates/rollup-boost/src/cli.rs new file mode 100644 index 0000000000000..77cc0e68d344c --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/cli.rs @@ -0,0 +1,453 @@ +use clap::Parser; +use jsonrpsee::{RpcModule, server::Server}; +use std::{net::SocketAddr, path::PathBuf}; +use tokio::signal::unix::{SignalKind, signal as unix_signal}; +use tracing::{Level, info}; + +use crate::{ + BlockSelectionPolicy, ClientArgs, DebugServer, FlashblocksP2PArgs, FlashblocksWsArgs, + ProxyLayer, RollupBoostServer, + client::rpc::{BuilderArgs, L2ClientArgs}, + debug_api::ExecutionMode, + get_version, init_metrics, + probe::ProbeLayer, +}; +use rollup_boost_types::payload::PayloadSource; + +#[derive(Clone, Debug, clap::Args)] +pub struct RollupBoostLibArgs { + #[clap(flatten)] + pub builder: BuilderArgs, + + #[clap(flatten)] + pub l2_client: L2ClientArgs, + + /// Execution mode to start rollup boost with + #[arg(long, env, default_value = "enabled")] + pub execution_mode: ExecutionMode, + + #[arg(long, env)] + pub block_selection_policy: Option, + + /// Should we use the l2 client for computing state root + #[arg(long, env, default_value = "false")] + pub external_state_root: bool, + + /// Allow all engine API calls to builder even when marked as unhealthy + /// This is default true assuming no builder CL set up + #[arg(long, env, default_value = "false")] + pub ignore_unhealthy_builders: bool, + + #[clap(flatten)] + pub flashblocks_ws: FlashblocksWsArgs, + + #[clap(flatten)] + pub flashblocks_p2p: Option, + + /// Duration in seconds between async health checks on the builder + #[arg(long, env, default_value = "60")] + pub health_check_interval: u64, + + /// Max duration in seconds between the unsafe head block of the builder and the current time + #[arg(long, env, default_value = "10")] + pub max_unsafe_interval: u64, +} + +#[derive(Clone, Parser, Debug)] +#[clap(author, version = get_version(), about)] +pub struct RollupBoostServiceArgs { + #[clap(flatten)] + pub lib: RollupBoostLibArgs, + + /// Host to run the server on + #[arg(long, env, default_value = "127.0.0.1")] + pub rpc_host: String, + + /// Port to run the server on + #[arg(long, env, default_value = "8081")] + pub rpc_port: u16, + + // Enable tracing + #[arg(long, env, default_value = "false")] + pub tracing: bool, + + // Enable Prometheus metrics + #[arg(long, env, default_value = "false")] + pub metrics: bool, + + /// Host to run the metrics server on + #[arg(long, env, default_value = "127.0.0.1")] + pub metrics_host: String, + + /// Port to run the metrics server on + #[arg(long, env, default_value = "9090")] + pub metrics_port: u16, + + /// OTLP endpoint + #[arg(long, env, default_value = "http://localhost:4317")] + pub otlp_endpoint: String, + + /// Log level + #[arg(long, env, default_value = "info")] + pub log_level: Level, + + /// Log format + #[arg(long, env, default_value = "text")] + pub log_format: LogFormat, + + /// Redirect logs to a file + #[arg(long, env)] + pub log_file: Option, + + /// Host to run the debug server on + #[arg(long, env, default_value = "127.0.0.1")] + pub debug_host: String, + + /// Debug server port + #[arg(long, env, default_value = "5555")] + pub debug_server_port: u16, +} + +impl RollupBoostServiceArgs { + pub async fn run(self) -> eyre::Result<()> { + let _ = rustls::crypto::ring::default_provider().install_default(); + init_metrics(&self)?; + + let debug_addr = format!("{}:{}", self.debug_host, self.debug_server_port); + let l2_client_args: ClientArgs = self.lib.l2_client.clone().into(); + let l2_http_client = l2_client_args.new_http_client(PayloadSource::L2)?; + + let builder_client_args: ClientArgs = self.lib.builder.clone().into(); + let builder_http_client = builder_client_args.new_http_client(PayloadSource::Builder)?; + + let (probe_layer, probes) = ProbeLayer::new(); + + let rollup_boost = RollupBoostServer::new_from_args(self.lib.clone(), probes.clone())?; + let health_handle = rollup_boost + .spawn_health_check(self.lib.health_check_interval, self.lib.max_unsafe_interval); + let debug_server = DebugServer::new(rollup_boost.execution_mode.clone()); + debug_server.run(&debug_addr).await?; + let rpc_module: RpcModule<()> = rollup_boost.try_into()?; + + // Build and start the server + let http_middleware = tower::ServiceBuilder::new() + .layer(probe_layer) + .layer(ProxyLayer::new(l2_http_client.clone(), builder_http_client.clone())); + + let server = Server::builder() + .set_http_middleware(http_middleware) + .build(format!("{}:{}", self.rpc_host, self.rpc_port).parse::()?) + .await?; + + let local_addr = server.local_addr()?; + info!("Starting server on {}", local_addr); + + let handle = server.start(rpc_module); + + let stop_handle = handle.clone(); + + // Capture SIGINT and SIGTERM + let mut sigint = unix_signal(SignalKind::interrupt())?; + let mut sigterm = unix_signal(SignalKind::terminate())?; + + tokio::select! { + _ = handle.stopped() => { + // The server has already shut down by itself + info!("Server stopped"); + } + _ = health_handle => { + info!("Health check task stopped"); + } + _ = sigint.recv() => { + info!("Received SIGINT, shutting down gracefully..."); + let _ = stop_handle.stop(); + } + _ = sigterm.recv() => { + info!("Received SIGTERM, shutting down gracefully..."); + let _ = stop_handle.stop(); + } + } + + Ok(()) + } +} + +impl Default for RollupBoostServiceArgs { + fn default() -> Self { + Self::parse_from::<_, &str>(std::iter::empty()) + } +} + +#[derive(Clone, Debug)] +pub enum LogFormat { + Json, + Text, +} + +impl std::str::FromStr for LogFormat { + type Err = String; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "json" => Ok(LogFormat::Json), + "text" => Ok(LogFormat::Text), + _ => Err("Invalid log format".into()), + } + } +} + +#[cfg(test)] +pub(crate) mod tests { + use std::result::Result; + + use super::*; + + const SECRET: &str = "f79ae8046bc11c9927afe911db7143c51a806c4a537cc08e0d37140b0192f430"; + const FLASHBLOCKS_SK: &str = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"; + const FLASHBLOCKS_VK: &str = "fedcba9876543210fedcba9876543210fedcba9876543210fedcba9876543210"; + + #[test] + fn test_parse_args_minimal() -> Result<(), Box> { + let args = RollupBoostServiceArgs::try_parse_from(["rollup-boost"])?; + + assert!(!args.tracing); + assert!(!args.metrics); + assert_eq!(args.rpc_host, "127.0.0.1"); + assert_eq!(args.rpc_port, 8081); + assert!(!args.lib.flashblocks_ws.flashblocks_ws); + assert!(args.lib.flashblocks_p2p.is_none()); + + Ok(()) + } + + #[test] + fn test_parse_args_missing_flashblocks_flag() -> Result<(), Box> { + let args = RollupBoostServiceArgs::try_parse_from([ + "rollup-boost", + "--builder-jwt-token", + SECRET, + "--l2-jwt-token", + SECRET, + "--flashblocks-authorizer-sk", + FLASHBLOCKS_SK, + "--flashblocks-builder-vk", + FLASHBLOCKS_VK, + ]); + + assert!(args.is_err(), "flashblocks args should be invalid without --flashblocks-p2p flag"); + + Ok(()) + } + + #[test] + fn test_parse_args_with_flashblocks_flag() -> Result<(), Box> { + let args = RollupBoostServiceArgs::try_parse_from([ + "rollup-boost", + "--builder-jwt-token", + SECRET, + "--l2-jwt-token", + SECRET, + "--flashblocks-p2p", + "--flashblocks-authorizer-sk", + FLASHBLOCKS_SK, + "--flashblocks-builder-vk", + FLASHBLOCKS_VK, + ])?; + + let flashblocks = + args.lib.flashblocks_p2p.expect("flashblocks should be Some when flag is passed"); + assert!(flashblocks.flashblocks_p2p); + + Ok(()) + } + + #[test] + fn test_parse_args_with_flashblocks_custom_values() -> Result<(), Box> { + let args = RollupBoostServiceArgs::try_parse_from([ + "rollup-boost", + "--builder-jwt-token", + SECRET, + "--l2-jwt-token", + SECRET, + "--flashblocks-p2p", + "--flashblocks-authorizer-sk", + FLASHBLOCKS_SK, + "--flashblocks-builder-vk", + FLASHBLOCKS_VK, + ])?; + + let flashblocks = + args.lib.flashblocks_p2p.expect("flashblocks should be Some when flag is passed"); + assert!(flashblocks.flashblocks_p2p); + + Ok(()) + } + + #[test] + fn test_parse_args_with_all_options() -> Result<(), Box> { + let args = RollupBoostServiceArgs::try_parse_from([ + "rollup-boost", + "--builder-jwt-token", + SECRET, + "--l2-jwt-token", + SECRET, + "--health-check-interval", + "120", + "--max-unsafe-interval", + "20", + "--rpc-host", + "0.0.0.0", + "--rpc-port", + "9090", + "--tracing", + "--metrics", + "--metrics-host", + "192.168.1.1", + "--metrics-port", + "8080", + "--log-level", + "debug", + "--log-format", + "json", + "--debug-host", + "localhost", + "--debug-server-port", + "6666", + "--execution-mode", + "disabled", + "--flashblocks-p2p", + "--flashblocks-authorizer-sk", + FLASHBLOCKS_SK, + "--flashblocks-builder-vk", + FLASHBLOCKS_VK, + ])?; + + assert_eq!(args.lib.health_check_interval, 120); + assert_eq!(args.lib.max_unsafe_interval, 20); + assert_eq!(args.rpc_host, "0.0.0.0"); + assert_eq!(args.rpc_port, 9090); + assert!(args.tracing); + assert!(args.metrics); + assert_eq!(args.metrics_host, "192.168.1.1"); + assert_eq!(args.metrics_port, 8080); + assert_eq!(args.log_level, Level::DEBUG); + assert_eq!(args.debug_host, "localhost"); + assert_eq!(args.debug_server_port, 6666); + + let flashblocks = + args.lib.flashblocks_p2p.expect("flashblocks should be Some when flag is passed"); + assert!(flashblocks.flashblocks_p2p); + + Ok(()) + } + + #[test] + fn test_parse_args_missing_jwt_succeeds_at_parse_time() { + // JWT validation happens at runtime, not parse time, so this should succeed + let result = + RollupBoostServiceArgs::try_parse_from(["rollup-boost", "--builder-jwt-token", SECRET]); + + assert!(result.is_ok()); + let args = result.unwrap(); + assert!(args.lib.builder.builder_jwt_token.is_some()); + assert!(args.lib.l2_client.l2_jwt_token.is_none()); + } + + #[test] + fn test_parse_args_invalid_flashblocks_sk() { + let result = RollupBoostServiceArgs::try_parse_from([ + "rollup-boost", + "--builder-jwt-token", + SECRET, + "--l2-jwt-token", + SECRET, + "--flashblocks-p2p", + "--flashblocks-authorizer-sk", + "invalid_hex", + "--flashblocks-builder-vk", + FLASHBLOCKS_VK, + ]); + + assert!(result.is_err()); + } + + #[test] + fn test_parse_args_invalid_flashblocks_vk() { + let result = RollupBoostServiceArgs::try_parse_from([ + "rollup-boost", + "--builder-jwt-token", + SECRET, + "--l2-jwt-token", + SECRET, + "--flashblocks-p2p", + "--flashblocks-authorizer-sk", + FLASHBLOCKS_SK, + "--flashblocks-builder-vk", + "invalid_hex", + ]); + + assert!(result.is_err()); + } + + #[test] + fn test_log_format_parsing() -> Result<(), Box> { + let json_args = RollupBoostServiceArgs::try_parse_from([ + "rollup-boost", + "--builder-jwt-token", + SECRET, + "--l2-jwt-token", + SECRET, + "--log-format", + "json", + ])?; + + match json_args.log_format { + LogFormat::Json => {} + LogFormat::Text => panic!("Expected Json format"), + } + + let text_args = RollupBoostServiceArgs::try_parse_from([ + "rollup-boost", + "--builder-jwt-token", + SECRET, + "--l2-jwt-token", + SECRET, + "--log-format", + "text", + ])?; + + match text_args.log_format { + LogFormat::Text => {} + LogFormat::Json => panic!("Expected Text format"), + } + + Ok(()) + } + + #[test] + fn test_parse_args_with_jwt_paths() -> Result<(), Box> { + use std::io::Write; + use tempfile::NamedTempFile; + + let mut builder_jwt_file = NamedTempFile::new()?; + writeln!(builder_jwt_file, "{SECRET}")?; + let builder_jwt_path = builder_jwt_file.path(); + + let mut l2_jwt_file = NamedTempFile::new()?; + writeln!(l2_jwt_file, "{SECRET}")?; + let l2_jwt_path = l2_jwt_file.path(); + + let args = RollupBoostServiceArgs::try_parse_from([ + "rollup-boost", + "--builder-jwt-path", + builder_jwt_path.to_str().unwrap(), + "--l2-jwt-path", + l2_jwt_path.to_str().unwrap(), + ])?; + + assert!(args.lib.builder.builder_jwt_path.is_some()); + assert!(args.lib.l2_client.l2_jwt_path.is_some()); + + Ok(()) + } +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/client/auth.rs b/rust/rollup-boost/crates/rollup-boost/src/client/auth.rs new file mode 100644 index 0000000000000..8fc7507474f3c --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/client/auth.rs @@ -0,0 +1,86 @@ +// From reth_rpc_layer +use alloy_rpc_types_engine::{Claims, JwtSecret}; +use http::{HeaderValue, header::AUTHORIZATION}; +use std::{ + iter::once, + task::{Context, Poll}, + time::{SystemTime, UNIX_EPOCH}, +}; +use tower::{Layer, Service}; +use tower_http::sensitive_headers::{SetSensitiveRequestHeaders, SetSensitiveRequestHeadersLayer}; + +pub type Auth = AuthService>; + +/// A layer that adds a new JWT token to every request using `AuthClientService`. +#[derive(Clone, Debug)] +pub struct AuthLayer { + secret: JwtSecret, +} + +impl AuthLayer { + /// Create a new `AuthClientLayer` with the given `secret`. + pub const fn new(secret: JwtSecret) -> Self { + Self { secret } + } +} + +impl Layer for AuthLayer { + type Service = AuthService>; + + fn layer(&self, inner: S) -> Self::Service { + let inner = SetSensitiveRequestHeadersLayer::new(once(AUTHORIZATION)).layer(inner); + AuthService::new(self.secret, inner) + } +} + +/// Automatically authenticates every client request with the given `secret`. +#[derive(Debug, Clone)] +pub struct AuthService { + secret: JwtSecret, + inner: S, +} + +impl AuthService { + const fn new(secret: JwtSecret, inner: S) -> Self { + Self { secret, inner } + } +} + +impl Service> for AuthService +where + S: Service>, + B: std::fmt::Debug, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut request: http::Request) -> Self::Future { + request.headers_mut().insert(AUTHORIZATION, secret_to_bearer_header(&self.secret)); + self.inner.call(request) + } +} + +/// Helper function to convert a secret into a Bearer auth header value with claims according to +/// . +/// The token is valid for 60 seconds. +pub fn secret_to_bearer_header(secret: &JwtSecret) -> HeaderValue { + format!( + "Bearer {}", + secret + .encode(&Claims { + iat: SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Failed to get epoch time") + .as_secs(), + exp: None, + }) + .expect("Failed to encode JWT claims") + ) + .parse() + .expect("Failed to parse JWT Header") +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/client/http.rs b/rust/rollup-boost/crates/rollup-boost/src/client/http.rs new file mode 100644 index 0000000000000..e1d0345209569 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/client/http.rs @@ -0,0 +1,112 @@ +use std::time::Duration; + +use crate::client::auth::AuthLayer; +use alloy_primitives::bytes::Bytes; +use alloy_rpc_types_engine::JwtSecret; +use http::Uri; +use http_body_util::{BodyExt, Full}; +use hyper::body::Body; +use hyper_rustls::HttpsConnector; +use hyper_util::{ + client::legacy::{Client, connect::HttpConnector}, + rt::TokioExecutor, +}; +use jsonrpsee::{core::BoxError, server::HttpBody}; +use opentelemetry::trace::SpanKind; +use rollup_boost_types::payload::PayloadSource; +use tower::{ + Service as _, ServiceBuilder, ServiceExt, + timeout::{Timeout, TimeoutLayer}, +}; +use tower_http::decompression::{Decompression, DecompressionLayer}; +use tracing::{debug, error, instrument}; + +use super::auth::Auth; + +pub type HttpClientService = + Timeout, HttpBody>>>>; + +#[derive(Clone, Debug)] +pub struct HttpClient { + client: HttpClientService, + url: Uri, + target: PayloadSource, +} + +impl HttpClient { + pub fn new(url: Uri, secret: JwtSecret, target: PayloadSource, timeout: u64) -> Self { + let connector = hyper_rustls::HttpsConnectorBuilder::new() + .with_native_roots() + .expect("no native root CA certificates found") + .https_or_http() + .enable_http1() + .enable_http2() + .build(); + + let client = Client::builder(TokioExecutor::new()).build(connector); + + let client = ServiceBuilder::new() + .layer(TimeoutLayer::new(Duration::from_millis(timeout))) + .layer(DecompressionLayer::new()) + .layer(AuthLayer::new(secret)) + .service(client); + + Self { client, url, target } + } + + /// Forwards an HTTP request to the `authrpc`, attaching the provided JWT authorization. + #[instrument( + skip(self, req), + fields( + otel.kind = ?SpanKind::Client, + url = %self.url, + method, + code, + ), + err(Debug) + )] + pub async fn forward( + &mut self, + mut req: http::Request, + method: String, + ) -> Result>, BoxError> + where + B: Body>> + + Send + + 'static, + { + debug!("forwarding {} to {}", method, self.target); + tracing::Span::current().record("method", method); + *req.uri_mut() = self.url.clone(); + + let req = req.map(HttpBody::new); + + let res = self.client.ready().await?.call(req).await?; + + let (parts, body) = res.into_parts(); + let body_bytes = body.collect().await?.to_bytes(); + + if let Some(code) = parse_response_code(&body_bytes)? { + error!(%code, "error in forwarded response"); + tracing::Span::current().record("code", code); + } + + Ok(http::Response::from_parts(parts, Full::from(body_bytes))) + } +} + +fn parse_response_code(body_bytes: &[u8]) -> eyre::Result> { + #[derive(serde::Deserialize, Debug)] + struct RpcResponse { + error: Option, + } + + #[derive(serde::Deserialize, Debug)] + struct JsonRpcError { + code: i32, + } + + let res = serde_json::from_slice::(body_bytes)?; + + Ok(res.error.map(|e| e.code)) +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/client/mod.rs b/rust/rollup-boost/crates/rollup-boost/src/client/mod.rs new file mode 100644 index 0000000000000..364eb9b99fbac --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/client/mod.rs @@ -0,0 +1,3 @@ +pub mod auth; +pub mod http; +pub mod rpc; diff --git a/rust/rollup-boost/crates/rollup-boost/src/client/rpc.rs b/rust/rollup-boost/crates/rollup-boost/src/client/rpc.rs new file mode 100644 index 0000000000000..a2cb37413c030 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/client/rpc.rs @@ -0,0 +1,651 @@ +use crate::{ + EngineApiExt, ExecutionMode, FlashblocksEngineApiClient as _, + client::{auth::AuthLayer, http::HttpClient as RollupBoostHttpClient}, + server::EngineApiClient, + version::{CARGO_PKG_VERSION, VERGEN_GIT_SHA}, +}; +use alloy_primitives::{B256, Bytes}; +use alloy_rpc_types_engine::{ + ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, JwtError, JwtSecret, PayloadId, + PayloadStatus, +}; +use alloy_rpc_types_eth::{Block, BlockNumberOrTag}; +use clap::Parser; +use ed25519_dalek::{SigningKey, VerifyingKey}; +use eyre::bail; +use http::{HeaderMap, Uri}; +use jsonrpsee::{ + core::{async_trait, middleware::layer::RpcLogger}, + http_client::{HttpClient, HttpClientBuilder, RpcService, transport::HttpBackend}, + types::ErrorObjectOwned, +}; +use op_alloy_rpc_types_engine::{ + OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4, OpExecutionPayloadV4, + OpPayloadAttributes, +}; +use opentelemetry::trace::SpanKind; +use parking_lot::Mutex; +use paste::paste; +use rollup_boost_types::{ + authorization::Authorization, + payload::{NewPayload, OpExecutionPayloadEnvelope, PayloadSource, PayloadVersion}, +}; +use std::{path::PathBuf, sync::Arc, time::Duration}; +use thiserror::Error; +use tracing::{info, instrument}; + +use super::auth::Auth; + +pub type RpcClientService = HttpClient>>>; + +const INTERNAL_ERROR: i32 = 13; + +pub(crate) type ClientResult = Result; + +#[derive(Error, Debug)] +pub enum RpcClientError { + #[error(transparent)] + Jsonrpsee(#[from] jsonrpsee::core::client::Error), + #[error("Invalid payload: {0}")] + InvalidPayload(String), + #[error(transparent)] + Io(#[from] std::io::Error), + #[error(transparent)] + Jwt(#[from] JwtError), +} + +trait Code: Sized { + fn code(&self) -> i32; + + fn set_code(self) -> Self { + tracing::Span::current().record("code", self.code()); + self + } +} + +impl Code for Result { + fn code(&self) -> i32 { + match self { + Ok(_) => 0, + Err(e) => e.code(), + } + } +} + +/// TODO: Add more robust error code system +impl Code for RpcClientError { + fn code(&self) -> i32 { + match self { + RpcClientError::Jsonrpsee(e) => e.code(), + // Status code 13 == internal error + _ => INTERNAL_ERROR, + } + } +} + +impl Code for jsonrpsee::core::client::Error { + fn code(&self) -> i32 { + match self { + jsonrpsee::core::client::Error::Call(call) => call.code(), + _ => INTERNAL_ERROR, + } + } +} + +impl From for ErrorObjectOwned { + fn from(err: RpcClientError) -> Self { + match err { + RpcClientError::Jsonrpsee(jsonrpsee::core::ClientError::Call(error_object)) => { + error_object + } + // Status code 13 == internal error + e => ErrorObjectOwned::owned(INTERNAL_ERROR, e.to_string(), Option::<()>::None), + } + } +} + +#[derive(Clone, Debug)] +pub struct FlashblocksP2PKeys { + /// Flashblocks Authorization Secret + pub authorization_sk: SigningKey, + /// Flashblocks builder vk + pub builder_vk: VerifyingKey, +} + +/// Client interface for interacting with execution layer node's Engine API. +/// +/// - **Engine API** calls are facilitated via the `auth_client` (requires JWT authentication). +#[derive(Clone, Debug)] +pub struct RpcClient { + /// Handles requests to the authenticated Engine API (requires JWT authentication) + auth_client: RpcClientService, + /// Uri of the RPC server for authenticated Engine API calls + auth_rpc: Uri, + /// The source of the payload + payload_source: PayloadSource, +} + +/// Client interface for interacting with execution layer node's Engine API. +/// +/// fork_choice_updated_v3 with attributes is converted into +/// a flashblocks_fork_choice_updated_v3 and an Authorization token is generated +/// +/// - **Engine API** calls are facilitated via the `auth_client` (requires JWT authentication). +#[derive(Clone, Debug)] +pub struct FlasblocksP2PRpcClient { + /// Inner RPC client + pub inner: RpcClient, + /// Flashblocks keys + pub flashblocks_p2p_keys: FlashblocksP2PKeys, + /// Execution mode of rollup boost + pub execution_mode: Arc>, +} + +impl RpcClient { + /// Initializes a new [`RpcClient`] with JWT auth for the Engine API and without auth for + /// general execution layer APIs. + pub fn new( + auth_rpc: Uri, + auth_rpc_jwt_secret: JwtSecret, + timeout: u64, + payload_source: PayloadSource, + ) -> Result { + let version = format!("{CARGO_PKG_VERSION}-{VERGEN_GIT_SHA}"); + let mut headers = HeaderMap::new(); + headers.insert("User-Agent", version.parse().unwrap()); + + let auth_layer = AuthLayer::new(auth_rpc_jwt_secret); + let auth_client = HttpClientBuilder::new() + .set_http_middleware(tower::ServiceBuilder::new().layer(auth_layer)) + .set_headers(headers) + .request_timeout(Duration::from_millis(timeout)) + .build(auth_rpc.to_string())?; + + Ok(Self { auth_client, auth_rpc, payload_source }) + } + + #[instrument( + skip_all, + err, + fields( + otel.kind = ?SpanKind::Client, + target = self.payload_source.to_string(), + head_block_hash = %fork_choice_state.head_block_hash, + url = %self.auth_rpc, + payload_attributes = payload_attributes.is_some(), + code, + payload_id + ) + )] + pub async fn fork_choice_updated_v3( + &self, + fork_choice_state: ForkchoiceState, + payload_attributes: Option, + ) -> ClientResult { + info!("Sending fork_choice_updated_v3 to {}", self.payload_source); + + let res = self + .auth_client + .fork_choice_updated_v3(fork_choice_state, payload_attributes.clone()) + .await + .set_code()?; + + if let Some(payload_id) = res.payload_id { + tracing::Span::current().record("payload_id", payload_id.to_string()); + } + + if res.is_invalid() { + return Err(RpcClientError::InvalidPayload(res.payload_status.status.to_string())) + .set_code(); + } + info!("Successfully sent fork_choice_updated_v3 to {}", self.payload_source); + + Ok(res) + } + + #[instrument( + skip(self), + err, + fields( + otel.kind = ?SpanKind::Client, + target = self.payload_source.to_string(), + url = %self.auth_rpc, + %payload_id, + ) + )] + pub async fn get_payload_v3( + &self, + payload_id: PayloadId, + ) -> ClientResult { + tracing::Span::current().record("payload_id", payload_id.to_string()); + info!("Sending get_payload_v3 to {}", self.payload_source); + Ok(self.auth_client.get_payload_v3(payload_id).await.set_code()?) + } + + #[instrument( + skip_all, + err, + fields( + otel.kind = ?SpanKind::Client, + target = self.payload_source.to_string(), + url = %self.auth_rpc, + block_hash = %payload.payload_inner.payload_inner.block_hash, + code, + ) + )] + pub async fn new_payload_v3( + &self, + payload: ExecutionPayloadV3, + versioned_hashes: Vec, + parent_beacon_block_root: B256, + ) -> ClientResult { + info!("Sending new_payload_v3 to {}", self.payload_source); + + let res = self + .auth_client + .new_payload_v3(payload, versioned_hashes, parent_beacon_block_root) + .await + .set_code()?; + + if res.is_invalid() { + return Err(RpcClientError::InvalidPayload(res.status.to_string()).set_code()); + } + + Ok(res) + } + + #[instrument( + skip(self), + err, + fields( + otel.kind = ?SpanKind::Client, + target = self.payload_source.to_string(), + url = %self.auth_rpc, + %payload_id, + ) + )] + pub async fn get_payload_v4( + &self, + payload_id: PayloadId, + ) -> ClientResult { + info!("Sending get_payload_v4 to {}", self.payload_source); + Ok(self.auth_client.get_payload_v4(payload_id).await.set_code()?) + } + + pub async fn get_payload( + &self, + payload_id: PayloadId, + version: PayloadVersion, + ) -> ClientResult { + match version { + PayloadVersion::V3 => Ok(OpExecutionPayloadEnvelope::V3( + self.get_payload_v3(payload_id).await.set_code()?, + )), + PayloadVersion::V4 => Ok(OpExecutionPayloadEnvelope::V4( + self.get_payload_v4(payload_id).await.set_code()?, + )), + } + } + + #[instrument( + skip_all, + err, + fields( + otel.kind = ?SpanKind::Client, + target = self.payload_source.to_string(), + url = %self.auth_rpc, + block_hash = %payload.payload_inner.payload_inner.payload_inner.block_hash, + code, + ) + )] + async fn new_payload_v4( + &self, + payload: OpExecutionPayloadV4, + versioned_hashes: Vec, + parent_beacon_block_root: B256, + execution_requests: Vec, + ) -> ClientResult { + info!("Sending new_payload_v4 to {}", self.payload_source); + + let res = self + .auth_client + .new_payload_v4(payload, versioned_hashes, parent_beacon_block_root, execution_requests) + .await + .set_code()?; + + if res.is_invalid() { + return Err(RpcClientError::InvalidPayload(res.status.to_string()).set_code()); + } + + Ok(res) + } + + pub async fn new_payload(&self, new_payload: NewPayload) -> ClientResult { + match new_payload { + NewPayload::V3(new_payload) => { + self.new_payload_v3( + new_payload.payload, + new_payload.versioned_hashes, + new_payload.parent_beacon_block_root, + ) + .await + } + NewPayload::V4(new_payload) => { + self.new_payload_v4( + new_payload.payload, + new_payload.versioned_hashes, + new_payload.parent_beacon_block_root, + new_payload.execution_requests, + ) + .await + } + } + } + + pub async fn get_block_by_number( + &self, + number: BlockNumberOrTag, + full: bool, + ) -> ClientResult { + Ok(self.auth_client.get_block_by_number(number, full).await.set_code()?) + } +} + +#[async_trait] +impl EngineApiExt for RpcClient { + async fn fork_choice_updated_v3( + &self, + fork_choice_state: ForkchoiceState, + payload_attributes: Option, + ) -> ClientResult { + self.fork_choice_updated_v3(fork_choice_state, payload_attributes).await + } + + async fn new_payload(&self, new_payload: NewPayload) -> ClientResult { + self.new_payload(new_payload).await + } + + async fn get_payload( + &self, + payload_id: PayloadId, + version: PayloadVersion, + ) -> ClientResult { + self.get_payload(payload_id, version).await + } + + async fn get_block_by_number( + &self, + number: BlockNumberOrTag, + full: bool, + ) -> ClientResult { + self.get_block_by_number(number, full).await + } +} + +impl FlasblocksP2PRpcClient { + #[instrument( + skip_all, + err, + fields( + otel.kind = ?SpanKind::Client, + target = self.inner.payload_source.to_string(), + head_block_hash = %fork_choice_state.head_block_hash, + url = %self.inner.auth_rpc, + code, + payload_id + ) + )] + pub async fn flashblocks_fork_choice_updated_v3( + &self, + fork_choice_state: ForkchoiceState, + payload_attributes: OpPayloadAttributes, + ) -> ClientResult { + info!("Sending flashblocks_fork_choice_updated_v3 to {}", self.inner.payload_source); + + let payload_id = payload_attributes.payload_id(&fork_choice_state.head_block_hash, 3); + let authorization = Authorization::new( + payload_id, + payload_attributes.payload_attributes.timestamp, + &self.flashblocks_p2p_keys.authorization_sk, + self.flashblocks_p2p_keys.builder_vk.clone(), + ); + + let res = self + .inner + .auth_client + .flashblocks_fork_choice_updated_v3( + fork_choice_state, + Some(payload_attributes), + Some(authorization), + ) + .await + .set_code()?; + + if let Some(payload_id) = res.payload_id { + tracing::Span::current().record("payload_id", payload_id.to_string()); + } + + if res.is_invalid() { + return Err(RpcClientError::InvalidPayload(res.payload_status.status.to_string())) + .set_code(); + } + info!( + "Successfully sent flashblocks_fork_choice_updated_v3 to {}", + self.inner.payload_source + ); + + Ok(res) + } +} + +#[async_trait] +impl EngineApiExt for FlasblocksP2PRpcClient { + async fn fork_choice_updated_v3( + &self, + fork_choice_state: ForkchoiceState, + payload_attributes: Option, + ) -> ClientResult { + let execution_mode = *self.execution_mode.lock(); + match (payload_attributes, execution_mode) { + // If we have payload attributes and execution mode is enabled, generate authorization + // We don't want to the builder to publish flashblocks if rollup boost is not going to + // honour the block. + (Some(attrs), ExecutionMode::Enabled) => Ok(self + .flashblocks_fork_choice_updated_v3(fork_choice_state, attrs) + .await + .set_code()?), + (attrs, _) => { + Ok(self.inner.fork_choice_updated_v3(fork_choice_state, attrs).await.set_code()?) + } + } + } + + async fn new_payload(&self, new_payload: NewPayload) -> ClientResult { + self.inner.new_payload(new_payload).await + } + + async fn get_payload( + &self, + payload_id: PayloadId, + version: PayloadVersion, + ) -> ClientResult { + self.inner.get_payload(payload_id, version).await + } + + async fn get_block_by_number( + &self, + number: BlockNumberOrTag, + full: bool, + ) -> ClientResult { + self.inner.get_block_by_number(number, full).await + } +} + +#[derive(Debug, Clone)] +pub struct ClientArgs { + /// Auth server address + pub url: Uri, + + /// Hex encoded JWT secret to use for the authenticated engine-API RPC server. + pub jwt_token: Option, + + /// Path to a JWT secret to use for the authenticated engine-API RPC server. + pub jwt_path: Option, + + /// Timeout for http calls in milliseconds + pub timeout: u64, +} + +impl ClientArgs { + fn get_auth_jwt(&self) -> eyre::Result { + if let Some(secret) = self.jwt_token { + Ok(secret) + } else if let Some(path) = self.jwt_path.as_ref() { + Ok(JwtSecret::from_file(path)?) + } else { + bail!("Missing Client JWT secret"); + } + } + + pub fn new_rpc_client(&self, payload_source: PayloadSource) -> eyre::Result { + RpcClient::new(self.url.clone(), self.get_auth_jwt()?, self.timeout, payload_source) + .map_err(eyre::Report::from) + } + + pub fn new_http_client( + &self, + payload_source: PayloadSource, + ) -> eyre::Result { + Ok(RollupBoostHttpClient::new( + self.url.clone(), + self.get_auth_jwt()?, + payload_source, + self.timeout, + )) + } +} + +/// Generates Clap argument structs with a prefix to create a unique namespace when specifying RPC +/// client config via the CLI. +macro_rules! define_client_args { + ($(($name:ident, $prefix:ident)),*) => { + $( + paste! { + #[derive(Parser, Debug, Clone, PartialEq, Eq)] + pub struct $name { + /// Auth server address + #[arg(long, env, default_value = "127.0.0.1:8551")] + pub [<$prefix _url>]: Uri, + + /// Hex encoded JWT secret to use for the authenticated engine-API RPC server. + #[arg(long, env, value_name = "HEX")] + pub [<$prefix _jwt_token>]: Option, + + /// Path to a JWT secret to use for the authenticated engine-API RPC server. + #[arg(long, env, value_name = "PATH")] + pub [<$prefix _jwt_path>]: Option, + + /// Timeout for http calls in milliseconds + #[arg(long, env, default_value_t = 1000)] + pub [<$prefix _timeout>]: u64, + } + + + impl From<$name> for ClientArgs { + fn from(args: $name) -> Self { + ClientArgs { + url: args.[<$prefix _url>].clone(), + jwt_token: args.[<$prefix _jwt_token>].clone(), + jwt_path: args.[<$prefix _jwt_path>], + timeout: args.[<$prefix _timeout>], + } + } + } + } + )* + }; +} + +define_client_args!((BuilderArgs, builder), (L2ClientArgs, l2)); + +#[cfg(test)] +pub(crate) mod tests { + use http::Uri; + use jsonrpsee::core::client::ClientT; + use parking_lot::Mutex; + + use alloy_rpc_types_engine::JwtSecret; + use jsonrpsee::{ + RpcModule, + core::client::Error as ClientError, + rpc_params, + server::{ServerBuilder, ServerHandle}, + }; + use rollup_boost_types::payload::PayloadSource; + use std::{ + collections::HashSet, + net::{SocketAddr, TcpListener}, + result::Result, + str::FromStr, + sync::LazyLock, + }; + + use super::*; + + const AUTH_ADDR: &str = "127.0.0.1"; + const SECRET: &str = "f79ae8046bc11c9927afe911db7143c51a806c4a537cc08e0d37140b0192f430"; + + pub fn get_available_port() -> u16 { + static CLAIMED_PORTS: LazyLock>> = + LazyLock::new(|| Mutex::new(HashSet::new())); + loop { + let port: u16 = rand::random_range(1000..20000); + if TcpListener::bind(("127.0.0.1", port)).is_ok() && CLAIMED_PORTS.lock().insert(port) { + return port; + } + } + } + + #[tokio::test] + async fn valid_jwt() { + let port = get_available_port(); + let secret = JwtSecret::from_hex(SECRET).unwrap(); + let auth_rpc = Uri::from_str(&format!("http://{AUTH_ADDR}:{port}")).unwrap(); + let client = RpcClient::new(auth_rpc, secret, 1000, PayloadSource::L2).unwrap(); + let response = send_request(client.auth_client, port).await; + assert!(response.is_ok()); + assert_eq!(response.unwrap(), "You are the dark lord"); + } + + async fn send_request(client: RpcClientService, port: u16) -> Result { + let server = spawn_server(port).await; + + let response = client.request::("greet_melkor", rpc_params![]).await; + + server.stop().unwrap(); + server.stopped().await; + + response + } + + /// Spawn a new RPC server equipped with a `JwtLayer` auth middleware. + async fn spawn_server(port: u16) -> ServerHandle { + let secret = JwtSecret::from_hex(SECRET).unwrap(); + let addr = format!("{AUTH_ADDR}:{port}"); + let layer = AuthLayer::new(secret); + let middleware = tower::ServiceBuilder::new().layer(layer); + + // Create a layered server + let server = ServerBuilder::default() + .set_http_middleware(middleware) + .build(addr.parse::().unwrap()) + .await + .unwrap(); + + // Create a mock rpc module + let mut module = RpcModule::new(()); + module.register_method("greet_melkor", |_, _, _| "You are the dark lord").unwrap(); + + server.start(module) + } +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/debug_api.rs b/rust/rollup-boost/crates/rollup-boost/src/debug_api.rs new file mode 100644 index 0000000000000..1cc0d60cd8eb6 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/debug_api.rs @@ -0,0 +1,186 @@ +use jsonrpsee::{ + core::{RpcResult, async_trait}, + http_client::HttpClient, + proc_macros::rpc, + server::Server, +}; +use parking_lot::Mutex; +use serde::{Deserialize, Serialize}; +use std::sync::Arc; + +use crate::update_execution_mode_gauge; + +#[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq, clap::ValueEnum)] +#[serde(rename_all = "snake_case")] +pub enum ExecutionMode { + // Normal execution, sending all requests + Enabled, + // Not sending get_payload requests + DryRun, + // Not sending any requests + Disabled, +} + +impl ExecutionMode { + pub fn is_dry_run(&self) -> bool { + matches!(self, ExecutionMode::DryRun) + } + + pub fn is_disabled(&self) -> bool { + matches!(self, ExecutionMode::Disabled) + } + + pub fn is_enabled(&self) -> bool { + matches!(self, ExecutionMode::Enabled) + } + + pub fn to_metric_value(&self) -> f64 { + match self { + ExecutionMode::Enabled => 3.0, + ExecutionMode::DryRun => 2.0, + ExecutionMode::Disabled => 1.0, + } + } +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct SetExecutionModeRequest { + pub execution_mode: ExecutionMode, +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct SetExecutionModeResponse { + pub execution_mode: ExecutionMode, +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct GetExecutionModeResponse { + pub execution_mode: ExecutionMode, +} + +#[rpc(server, client, namespace = "debug")] +pub trait DebugApi { + #[method(name = "setExecutionMode")] + async fn set_execution_mode( + &self, + request: SetExecutionModeRequest, + ) -> RpcResult; + + #[method(name = "getExecutionMode")] + async fn get_execution_mode(&self) -> RpcResult; +} + +pub struct DebugServer { + execution_mode: Arc>, +} + +impl DebugServer { + pub fn new(execution_mode: Arc>) -> Self { + Self { execution_mode } + } + + pub async fn run(self, debug_addr: &str) -> eyre::Result<()> { + let server = Server::builder().build(debug_addr).await?; + + // Register the initial execution mode metric + let current_mode = self.execution_mode(); + update_execution_mode_gauge(current_mode); + + let handle = server.start(self.into_rpc()); + + tracing::info!("Debug server listening on addr {}", debug_addr); + + // In this example we don't care about doing shutdown so let's it run forever. + // You may use the `ServerHandle` to shut it down or manage it yourself. + tokio::spawn(handle.stopped()); + + Ok(()) + } + + pub fn execution_mode(&self) -> ExecutionMode { + *self.execution_mode.lock() + } + + pub fn set_execution_mode(&self, mode: ExecutionMode) { + *self.execution_mode.lock() = mode; + update_execution_mode_gauge(mode); + } +} + +#[async_trait] +impl DebugApiServer for DebugServer { + async fn set_execution_mode( + &self, + request: SetExecutionModeRequest, + ) -> RpcResult { + self.set_execution_mode(request.execution_mode); + + tracing::info!("Set execution mode to {:?}", request.execution_mode); + + Ok(SetExecutionModeResponse { execution_mode: request.execution_mode }) + } + + async fn get_execution_mode(&self) -> RpcResult { + Ok(GetExecutionModeResponse { execution_mode: self.execution_mode() }) + } +} + +pub struct DebugClient { + client: HttpClient, +} + +impl DebugClient { + pub fn new(url: &str) -> eyre::Result { + let client = HttpClient::builder().build(url)?; + + Ok(Self { client }) + } + + pub async fn set_execution_mode( + &self, + execution_mode: ExecutionMode, + ) -> eyre::Result { + let request = SetExecutionModeRequest { execution_mode }; + let result = DebugApiClient::set_execution_mode(&self.client, request).await?; + Ok(result) + } + + pub async fn get_execution_mode(&self) -> eyre::Result { + let result = DebugApiClient::get_execution_mode(&self.client).await?; + Ok(result) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + const DEFAULT_ADDR: &str = "127.0.0.1:5555"; + + #[tokio::test] + async fn test_debug_client() { + // spawn the server and try to modify it with the client + let execution_mode = Arc::new(Mutex::new(ExecutionMode::Enabled)); + + let server = DebugServer::new(execution_mode.clone()); + server.run(DEFAULT_ADDR).await.unwrap(); + + let client = DebugClient::new(format!("http://{DEFAULT_ADDR}").as_str()).unwrap(); + + // Test setting execution mode to Disabled + let result = client.set_execution_mode(ExecutionMode::Disabled).await.unwrap(); + assert_eq!(result.execution_mode, ExecutionMode::Disabled); + + // Verify with get_execution_mode + let status = client.get_execution_mode().await.unwrap(); + assert_eq!(status.execution_mode, ExecutionMode::Disabled); + + // Test setting execution mode back to Enabled + let result = client.set_execution_mode(ExecutionMode::Enabled).await.unwrap(); + assert_eq!(result.execution_mode, ExecutionMode::Enabled); + + // Verify again with get_execution_mode + let status = client.get_execution_mode().await.unwrap(); + assert_eq!(status.execution_mode, ExecutionMode::Enabled); + } +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/engine_api.rs b/rust/rollup-boost/crates/rollup-boost/src/engine_api.rs new file mode 100644 index 0000000000000..5023cea7ed514 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/engine_api.rs @@ -0,0 +1,30 @@ +use alloy_rpc_types_engine::{ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus}; +use alloy_rpc_types_eth::{Block, BlockNumberOrTag}; +use jsonrpsee::core::async_trait; +use op_alloy_rpc_types_engine::OpPayloadAttributes; + +use crate::ClientResult; +use rollup_boost_types::payload::{NewPayload, OpExecutionPayloadEnvelope, PayloadVersion}; + +#[async_trait] +pub trait EngineApiExt: std::fmt::Debug + Send + Sync + 'static { + async fn fork_choice_updated_v3( + &self, + fork_choice_state: ForkchoiceState, + payload_attributes: Option, + ) -> ClientResult; + + async fn new_payload(&self, new_payload: NewPayload) -> ClientResult; + + async fn get_payload( + &self, + payload_id: PayloadId, + version: PayloadVersion, + ) -> ClientResult; + + async fn get_block_by_number( + &self, + number: BlockNumberOrTag, + full: bool, + ) -> ClientResult; +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/flashblocks/args.rs b/rust/rollup-boost/crates/rollup-boost/src/flashblocks/args.rs new file mode 100644 index 0000000000000..7939e1ef96a5f --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/flashblocks/args.rs @@ -0,0 +1,129 @@ +use backoff::{ExponentialBackoff, ExponentialBackoffBuilder}; +use clap::{Args, Parser}; +use ed25519_dalek::{SigningKey, VerifyingKey}; +use std::time::Duration; +use url::Url; + +use hex::FromHex; + +#[derive(Args, Clone, Debug)] +#[group(requires = "flashblocks_ws")] +pub struct FlashblocksWsArgs { + /// Enable Flashblocks Websocket client + #[arg( + // Keep the flag as "flashblocks" for backward compatibility + long = "flashblocks", + id = "flashblocks_ws", + conflicts_with = "flashblocks_p2p", + env + )] + pub flashblocks_ws: bool, + + /// Flashblocks Builder WebSocket URL + #[arg(long, env, default_value = "ws://127.0.0.1:1111")] + pub flashblocks_builder_url: Url, + + /// Flashblocks WebSocket host for outbound connections + #[arg(long, env, default_value = "127.0.0.1")] + pub flashblocks_host: String, + + /// Flashblocks WebSocket port for outbound connections + #[arg(long, env, default_value = "1112")] + pub flashblocks_port: u16, + + /// Websocket connection configuration + #[command(flatten)] + pub flashblocks_ws_config: FlashblocksWebsocketConfig, +} + +#[derive(Parser, Debug, Clone, Copy)] +pub struct FlashblocksWebsocketConfig { + /// Minimum time for exponential backoff for timeout if builder disconnected + #[arg(long, env, default_value = "10")] + pub flashblock_builder_ws_initial_reconnect_ms: u64, + + /// Maximum time for exponential backoff for timeout if builder disconnected + #[arg(long, env, default_value = "5000")] + pub flashblock_builder_ws_max_reconnect_ms: u64, + + /// Timeout for connection attempt + #[arg(long, env, default_value = "5000")] + pub flashblock_builder_ws_connect_timeout_ms: u64, + + /// Interval in milliseconds between ping messages sent to upstream servers to detect + /// unresponsive connections + #[arg(long, env, default_value = "500")] + pub flashblock_builder_ws_ping_interval_ms: u64, + + /// Timeout in milliseconds to wait for pong responses from upstream servers before considering + /// the connection dead + #[arg(long, env, default_value = "1500")] + pub flashblock_builder_ws_pong_timeout_ms: u64, +} + +impl FlashblocksWebsocketConfig { + /// Creates `ExponentialBackoff` use to control builder websocket reconnection time + pub fn backoff(&self) -> ExponentialBackoff { + ExponentialBackoffBuilder::default() + .with_initial_interval(self.initial_interval()) + .with_max_interval(self.max_interval()) + .with_randomization_factor(0 as f64) + .with_max_elapsed_time(None) + .with_multiplier(2.0) + .build() + } + + /// Returns initial time for exponential backoff + pub fn initial_interval(&self) -> Duration { + Duration::from_millis(self.flashblock_builder_ws_initial_reconnect_ms) + } + + /// Returns maximal time for exponential backoff + pub fn max_interval(&self) -> Duration { + Duration::from_millis(self.flashblock_builder_ws_max_reconnect_ms) + } + + /// Returns ping interval + pub fn ping_interval(&self) -> Duration { + Duration::from_millis(self.flashblock_builder_ws_ping_interval_ms) + } + + /// Returns pong interval + pub fn pong_interval(&self) -> Duration { + Duration::from_millis(self.flashblock_builder_ws_pong_timeout_ms) + } +} + +#[derive(Args, Clone, Debug)] +#[group(requires = "flashblocks_p2p")] +pub struct FlashblocksP2PArgs { + /// Enable Flashblocks P2P Authorization + #[arg(long, id = "flashblocks_p2p", conflicts_with = "flashblocks_ws", env, required = false)] + pub flashblocks_p2p: bool, + + #[arg( + long = "flashblocks-authorizer-sk", + env = "FLASHBLOCKS_AUTHORIZER_SK", + value_parser = parse_sk, + required = false, + )] + pub authorizer_sk: SigningKey, + + #[arg( + long = "flashblocks-builder-vk", + env = "FLASHBLOCKS_BUILDER_VK", + value_parser = parse_vk, + required = false, + )] + pub builder_vk: VerifyingKey, +} + +pub fn parse_sk(s: &str) -> eyre::Result { + let bytes = <[u8; 32]>::from_hex(s.trim())?; + Ok(SigningKey::from_bytes(&bytes)) +} + +pub fn parse_vk(s: &str) -> eyre::Result { + let bytes = <[u8; 32]>::from_hex(s.trim())?; + Ok(VerifyingKey::from_bytes(&bytes)?) +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/flashblocks/inbound.rs b/rust/rollup-boost/crates/rollup-boost/src/flashblocks/inbound.rs new file mode 100644 index 0000000000000..3cc6ae59b86d3 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/flashblocks/inbound.rs @@ -0,0 +1,570 @@ +use super::metrics::FlashblocksWsInboundMetrics; +use crate::FlashblocksWebsocketConfig; +use backoff::{ExponentialBackoff, backoff::Backoff}; +use bytes::Bytes; +use futures::{SinkExt, StreamExt}; +use lru::LruCache; +use op_alloy_rpc_types_engine::OpFlashblockPayload; +use std::{ + io::ErrorKind::TimedOut, + num::NonZeroUsize, + sync::{Arc, Mutex}, + time::Duration, +}; +use tokio::{sync::mpsc, time::interval}; +use tokio_tungstenite::{ + connect_async, + tungstenite::{Error::Io, Message}, +}; +use tokio_util::sync::CancellationToken; +use tracing::{error, info}; +use url::Url; + +const MAXIMUM_PINGS: NonZeroUsize = NonZeroUsize::new(60).expect("positive number always non zero"); + +#[derive(Debug, thiserror::Error)] +enum FlashblocksReceiverError { + #[error("WebSocket connection failed: {0}")] + Connection(#[from] tokio_tungstenite::tungstenite::Error), + + #[error("Ping failed")] + PingFailed, + + #[error("Pong timeout")] + PongTimeout, + + #[error("Websocket haven't return the message")] + MessageMissing, + + #[error("Connection error: {0}")] + ConnectionError(String), + + #[error("Connection closed")] + ConnectionClosed, + + #[error("Task panicked: {0}")] + TaskPanic(String), + + #[error("Failed to send message to sender: {0}")] + SendError(#[from] Box>), + + #[error("Ping mutex poisoned")] + MutexPoisoned, +} + +pub struct FlashblocksReceiverService { + url: Url, + sender: mpsc::Sender, + websocket_config: FlashblocksWebsocketConfig, + metrics: FlashblocksWsInboundMetrics, +} + +impl FlashblocksReceiverService { + pub fn new( + url: Url, + sender: mpsc::Sender, + websocket_config: FlashblocksWebsocketConfig, + ) -> Self { + Self { url, sender, websocket_config, metrics: Default::default() } + } + + pub async fn run(self) { + let mut backoff = self.websocket_config.backoff(); + let timeout = + Duration::from_millis(self.websocket_config.flashblock_builder_ws_connect_timeout_ms); + + info!("FlashblocksReceiverService starting reconnection loop"); + loop { + if let Err(e) = self.connect_and_handle(&mut backoff, timeout).await { + let interval = backoff + .next_backoff() + .unwrap_or_else(|| { + error!("Backoff returned None despite max_elapsed_time=None, using max_interval as fallback"); + self.websocket_config.max_interval() + }); + error!( + "Flashblocks receiver connection error, retrying in {}ms: {}", + interval.as_millis(), + e + ); + self.metrics.reconnect_attempts.increment(1); + self.metrics.connection_status.set(0); + tokio::time::sleep(interval).await; + } else { + // connect_and_handle should never return Ok(()) + error!("Builder websocket connection has stopped. Invariant is broken."); + self.metrics.connection_status.set(0); + } + } + } + + async fn connect_and_handle( + &self, + backoff: &mut ExponentialBackoff, + timeout: Duration, + ) -> Result<(), FlashblocksReceiverError> { + // Timeout is used to ensure we won't get stuck in case some TCP frames go missing + let (ws_stream, _) = tokio::time::timeout(timeout, connect_async(self.url.as_str())) + .await + .map_err(|_| FlashblocksReceiverError::Connection(Io(TimedOut.into())))??; + let (mut write, mut read) = ws_stream.split(); + + info!("Connected to Flashblocks receiver at {}", self.url); + self.metrics.connection_status.set(1); + + let cancel_token = CancellationToken::new(); + let cancel_for_ping = cancel_token.clone(); + + // LRU cache with capacity of 60 pings - automatically evicts oldest entries + let ping_cache = Arc::new(Mutex::new(LruCache::new(MAXIMUM_PINGS))); + let pong_cache = ping_cache.clone(); + let mut ping_interval = interval(self.websocket_config.ping_interval()); + let ping_task = tokio::spawn(async move { + loop { + tokio::select! { + _ = ping_interval.tick() => { + let uuid = uuid::Uuid::now_v7(); + if write.send(Message::Ping(Bytes::copy_from_slice(uuid.as_bytes().as_slice()))).await.is_err() { + return Err(FlashblocksReceiverError::PingFailed); + } + match ping_cache.lock() { + Ok(mut cache) => { + cache.put(uuid, ()); + } + Err(_) => { + return Err(FlashblocksReceiverError::MutexPoisoned); + } + } + } + _ = cancel_for_ping.cancelled() => { + tracing::debug!("Ping task cancelled"); + if let Err(e) = write.close().await { + tracing::warn!("Failed to close builder ws connection: {}", e); + } + return Ok(()); + } + } + } + }); + + let sender = self.sender.clone(); + let metrics = self.metrics.clone(); + + let pong_timeout = self.websocket_config.pong_interval(); + let message_handle = tokio::spawn(async move { + let mut pong_interval = interval(pong_timeout); + // We await here because first tick executes immediately + pong_interval.tick().await; + loop { + tokio::select! { + result = read.next() => { + match result { + Some(Ok(msg)) => match msg { + Message::Text(text) => { + metrics.messages_received.increment(1); + match serde_json::from_str::(&text) { + Ok(flashblocks_msg) => sender.send(flashblocks_msg).await.map_err(|e| { + FlashblocksReceiverError::SendError(Box::new(e)) + })?, + Err(e) => error!("Failed to process flashblock, error: {e}") + } + } + Message::Close(_) => { + return Err(FlashblocksReceiverError::ConnectionClosed); + } + Message::Pong(data) => { + match uuid::Uuid::from_slice(data.as_ref()) { + Ok(uuid) => { + match pong_cache.lock() { + Ok(mut cache) => { + if cache.pop(&uuid).is_some() { + pong_interval.reset(); + } else { + tracing::warn!("Received pong with unknown data:{}", uuid); + } + } + Err(_) => { + return Err(FlashblocksReceiverError::MutexPoisoned); + } + } + } + Err(e) => { + tracing::warn!("Failed to parse pong: {e}"); + } + } + } + Message::Ping(_) => {}, + msg => { + tracing::warn!("Received unexpected message: {:?}", msg); + } + }, + Some(Err(e)) => { + return Err(FlashblocksReceiverError::ConnectionError(e.to_string())); + } + None => { + return Err(FlashblocksReceiverError::MessageMissing); + } + } + }, + _ = pong_interval.tick() => { + return Err(FlashblocksReceiverError::PongTimeout); + } + }; + } + }); + + let connection_start = std::time::Instant::now(); + + let result = tokio::select! { + result = message_handle => { + result.map_err(|e| FlashblocksReceiverError::TaskPanic(e.to_string()))? + }, + result = ping_task => { + result.map_err(|e| FlashblocksReceiverError::TaskPanic(e.to_string()))? + }, + }; + + cancel_token.cancel(); + + // Only reset backoff if connection was stable for the max_interval set + // This prevents rapid reconnection loops when a proxy accepts and immediately drops + // connections + if connection_start.elapsed() >= backoff.max_interval { + backoff.reset(); + } + result + } +} + +#[cfg(test)] +mod tests { + use futures::SinkExt; + use tokio::sync::watch; + use tokio_tungstenite::{accept_async, tungstenite::Utf8Bytes}; + + use super::*; + use std::{ + net::{SocketAddr, TcpListener}, + sync::atomic::{AtomicBool, Ordering}, + }; + + async fn start( + addr: SocketAddr, + ) -> eyre::Result<( + watch::Sender, + mpsc::Sender, + mpsc::Receiver<()>, + url::Url, + )> { + let (term_tx, mut term_rx) = watch::channel(false); + let (send_tx, mut send_rx) = mpsc::channel::(100); + let (send_ping_tx, send_ping_rx) = mpsc::channel::<()>(100); + + let listener = TcpListener::bind(addr)?; + let url = Url::parse(&format!("ws://{addr}"))?; + + listener.set_nonblocking(true).expect("Failed to set TcpListener socket to non-blocking"); + + let listener = tokio::net::TcpListener::from_std(listener) + .expect("Failed to convert TcpListener to tokio TcpListener"); + + tokio::spawn(async move { + loop { + tokio::select! { + _ = term_rx.changed() => { + if *term_rx.borrow() { + return; + } + } + + result = listener.accept() => { + match result { + Ok((connection, _addr)) => { + match accept_async(connection).await { + Ok(ws_stream) => { + let (mut write, mut read) = ws_stream.split(); + + loop { + tokio::select! { + Some(msg) = send_rx.recv() => { + let serialized = serde_json::to_string(&msg).expect("message serialized"); + let utf8_bytes = Utf8Bytes::from(serialized); + + write.send(Message::Text(utf8_bytes)).await.expect("message sent"); + }, + msg = read.next() => { + // we need to read for the library to handle pong messages + if let Some(Ok(Message::Ping(_))) = msg { + send_ping_tx.send(()).await.expect("ping notification sent"); + } + } + _ = term_rx.changed() => { + if *term_rx.borrow() { + return; + } + } + } + } + } + Err(e) => { + eprintln!("Failed to accept WebSocket connection: {e}"); + } + } + } + Err(e) => { + // Optionally break or continue based on error type + if e.kind() == std::io::ErrorKind::Interrupted { + break; + } + } + } + } + } + } + }); + + Ok((term_tx, send_tx, send_ping_rx, url)) + } + + async fn start_ping_server( + addr: SocketAddr, + send_pongs: Arc, + ) -> eyre::Result<(watch::Receiver, mpsc::Receiver, url::Url)> { + let (term_tx, term_rx) = watch::channel(false); + let (send_ping_tx, send_ping_rx) = mpsc::channel(100); + + let listener = TcpListener::bind(addr)?; + let url = Url::parse(&format!("ws://{addr}"))?; + + listener.set_nonblocking(true).expect("can set TcpListener socket to non-blocking"); + + let listener = tokio::net::TcpListener::from_std(listener) + .expect("can convert TcpListener to tokio TcpListener"); + + tokio::spawn(async move { + loop { + let result = listener.accept().await; + match result { + Ok((connection, _addr)) => { + match accept_async(connection).await { + Ok(ws_stream) => { + let (_, mut read) = ws_stream.split(); + loop { + if send_pongs.load(Ordering::Relaxed) { + let msg = read.next().await; + match msg { + // we need to read for the library to handle pong + // messages + Some(Ok(Message::Ping(data))) => { + send_ping_tx + .send(data) + .await + .expect("ping data sent"); + } + Some(Err(_)) => { + break; + } + _ => {} + } + } else { + tokio::time::sleep(tokio::time::Duration::from_millis(1)) + .await; + } + } + } + Err(e) => { + eprintln!("Failed to accept WebSocket connection: {e}"); + } + } + } + Err(e) => { + // Optionally break or continue based on error type + if e.kind() == std::io::ErrorKind::Interrupted { + break; + } + } + } + // If we have broken from the loop it means reconnection occurred + term_tx.send(true).expect("channel is up"); + } + }); + + Ok((term_rx, send_ping_rx, url)) + } + + #[tokio::test] + async fn test_flashblocks_receiver_service() -> eyre::Result<()> { + let addr = "127.0.0.1:8080".parse::().expect("valid socket address"); + let (term, send_msg, _, url) = start(addr).await?; + + let (tx, mut rx) = mpsc::channel(100); + + let config = FlashblocksWebsocketConfig { + flashblock_builder_ws_initial_reconnect_ms: 100, + flashblock_builder_ws_max_reconnect_ms: 100, + flashblock_builder_ws_ping_interval_ms: 500, + flashblock_builder_ws_pong_timeout_ms: 2000, + flashblock_builder_ws_connect_timeout_ms: 5000, + }; + let service = FlashblocksReceiverService::new(url, tx, config); + tokio::spawn(async move { + service.run().await; + }); + + // Send a message to the websocket server + send_msg + .send(OpFlashblockPayload::default()) + .await + .expect("message sent to websocket server"); + + let msg = rx.recv().await.expect("message received from websocket"); + assert_eq!(msg, OpFlashblockPayload::default()); + + // Drop the websocket server and start another one with the same address + // The FlashblocksReceiverService should reconnect to the new server + term.send(true).expect("termination signal sent"); + + // sleep for 1 second to ensure the server is dropped + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + + // start a new server with the same address + let (term, send_msg, _, _url) = start(addr).await?; + send_msg + .send(OpFlashblockPayload::default()) + .await + .expect("message sent to websocket server"); + + let msg = rx.recv().await.expect("message received from websocket"); + assert_eq!(msg, OpFlashblockPayload::default()); + term.send(true).expect("termination signal sent"); + + Ok(()) + } + + #[tokio::test] + async fn test_flashblocks_receiver_service_ping_pong() -> eyre::Result<()> { + // test that if the builder is not sending any messages back, the service will send + // ping messages to test the connection periodically + + let addr = "127.0.0.1:8081".parse::().expect("valid socket address"); + let send_pongs = Arc::new(AtomicBool::new(true)); + let (term, mut ping_rx, url) = start_ping_server(addr, send_pongs.clone()).await?; + let config = FlashblocksWebsocketConfig { + flashblock_builder_ws_initial_reconnect_ms: 100, + flashblock_builder_ws_max_reconnect_ms: 1000, + flashblock_builder_ws_ping_interval_ms: 500, + flashblock_builder_ws_pong_timeout_ms: 2000, + flashblock_builder_ws_connect_timeout_ms: 5000, + }; + + let (tx, _rx) = mpsc::channel(100); + let service = FlashblocksReceiverService::new(url, tx, config); + tokio::spawn(async move { + service.run().await; + }); + + // even if we do not send any messages, we should receive pings to keep the connection alive + for _ in 0..5 { + ping_rx.recv().await.expect("ping received"); + } + // Check that server hasn't reconnected because we have answered to pongs + let reconnected = term.has_changed().expect("channel not closed"); + assert!(!reconnected, "not reconnected when we answered to pings"); + + send_pongs.store(false, Ordering::Relaxed); + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + send_pongs.store(true, Ordering::Relaxed); + // This sleep is to ensure that we will try to read socket and realise it closed + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + // One second is not enough to break the connection + let reconnected = term.has_changed().expect("channel not closed"); + assert!(!reconnected, "have reconnected before deadline is reached"); + + send_pongs.store(false, Ordering::Relaxed); + tokio::time::sleep(std::time::Duration::from_secs(3)).await; + send_pongs.store(true, Ordering::Relaxed); + // This sleep is to ensure that we will try to read socket and realise it closed + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + + // 3 seconds will cause reconnect + let reconnected = term.has_changed().expect("channel not closed"); + assert!(reconnected, "haven't reconnected after deadline is reached"); + Ok(()) + } + + /// Starts a TCP server that accepts connections but never completes the WebSocket handshake. + /// This simulates a stuck connection during the handshake phase. + async fn start_stuck_server(addr: SocketAddr) -> eyre::Result<(watch::Sender, url::Url)> { + let (term_tx, mut term_rx) = watch::channel(false); + + let listener = TcpListener::bind(addr)?; + let url = Url::parse(&format!("ws://{addr}"))?; + + listener.set_nonblocking(true).expect("can set TcpListener socket to non-blocking"); + + let listener = tokio::net::TcpListener::from_std(listener) + .expect("can convert TcpListener to tokio TcpListener"); + + tokio::spawn(async move { + // Store connections to keep them alive without responding + let mut held_connections: Vec = Vec::new(); + loop { + tokio::select! { + _ = term_rx.changed() => { + if *term_rx.borrow() { + return; + } + } + result = listener.accept() => { + if let Ok((connection, _addr)) = result { + // Accept the TCP connection but never complete the WebSocket handshake + // Keep the connection alive by storing it + held_connections.push(connection); + } + } + } + } + }); + + Ok((term_tx, url)) + } + + #[tokio::test] + async fn test_flashblocks_receiver_service_connect_timeout() -> eyre::Result<()> { + // Test that if the WebSocket handshake hangs, the service will timeout + let addr = "127.0.0.1:8082".parse::().expect("valid socket address"); + + let (term, url) = start_stuck_server(addr).await?; + + let config = FlashblocksWebsocketConfig { + flashblock_builder_ws_initial_reconnect_ms: 100, + flashblock_builder_ws_max_reconnect_ms: 200, + flashblock_builder_ws_ping_interval_ms: 500, + flashblock_builder_ws_pong_timeout_ms: 2000, + // Set a 1 second timeout for connection attempts + flashblock_builder_ws_connect_timeout_ms: 1000, + }; + + let (tx, _rx) = mpsc::channel(100); + let service = FlashblocksReceiverService::new(url, tx, config); + + let timeout = + std::time::Duration::from_millis(config.flashblock_builder_ws_connect_timeout_ms); + let mut backoff = config.backoff(); + + // Call connect_and_handle directly - it should timeout and return an error + let result = service.connect_and_handle(&mut backoff, timeout).await; + + assert!(result.is_err(), "connect_and_handle should return error on timeout"); + + // Verify it's a connection error (timeout is wrapped as connection error) + let err = result.unwrap_err(); + assert!( + matches!(err, FlashblocksReceiverError::Connection(_)), + "expected Connection error, got: {err:?}" + ); + + term.send(true).expect("termination signal sent"); + Ok(()) + } +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/flashblocks/launcher.rs b/rust/rollup-boost/crates/rollup-boost/src/flashblocks/launcher.rs new file mode 100644 index 0000000000000..3bb67cb991d92 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/flashblocks/launcher.rs @@ -0,0 +1,33 @@ +use crate::{ + FlashblocksService, FlashblocksWebsocketConfig, RpcClient, + flashblocks::inbound::FlashblocksReceiverService, +}; +use core::net::SocketAddr; +use tokio::sync::mpsc; +use url::Url; + +pub struct Flashblocks {} + +impl Flashblocks { + pub fn run( + builder_url: RpcClient, + flashblocks_url: Url, + outbound_addr: SocketAddr, + websocket_config: FlashblocksWebsocketConfig, + ) -> eyre::Result { + let (tx, rx) = mpsc::channel(100); + + let receiver = FlashblocksReceiverService::new(flashblocks_url, tx, websocket_config); + tokio::spawn(async move { + let _ = receiver.run().await; + }); + + let service = FlashblocksService::new(builder_url, outbound_addr)?; + let mut service_handle = service.clone(); + tokio::spawn(async move { + service_handle.run(rx).await; + }); + + Ok(service) + } +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/flashblocks/metrics.rs b/rust/rollup-boost/crates/rollup-boost/src/flashblocks/metrics.rs new file mode 100644 index 0000000000000..9102cf66ede39 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/flashblocks/metrics.rs @@ -0,0 +1,56 @@ +use metrics::{Counter, Gauge, Histogram}; +use metrics_derive::Metrics; + +#[derive(Metrics, Clone)] +#[metrics(scope = "flashblocks.ws_inbound")] +pub struct FlashblocksWsInboundMetrics { + /// Total number of WebSocket reconnection attempts + #[metric(describe = "Total number of WebSocket reconnection attempts")] + pub reconnect_attempts: Counter, + + /// Current WebSocket connection status (1 = connected, 0 = disconnected) + #[metric(describe = "Current WebSocket connection status")] + pub connection_status: Gauge, + + #[metric(describe = "Number of flashblock messages received from builder")] + pub messages_received: Counter, +} + +#[derive(Metrics, Clone)] +#[metrics(scope = "flashblocks.service")] +pub struct FlashblocksServiceMetrics { + #[metric(describe = "Number of errors when extending payload")] + pub extend_payload_errors: Counter, + + #[metric(describe = "Number of times the current payload ID has been set")] + pub current_payload_id_mismatch: Counter, + + #[metric(describe = "Number of messages processed by the service")] + pub messages_processed: Counter, + + #[metric(describe = "Total number of used flashblocks")] + pub flashblocks_gauge: Gauge, + + #[metric(describe = "Total number of used flashblocks")] + pub flashblocks_counter: Counter, + + #[metric(describe = "Reduction in flashblocks issued.")] + pub flashblocks_missing_histogram: Histogram, + + #[metric(describe = "Reduction in flashblocks issued.")] + pub flashblocks_missing_gauge: Gauge, + + #[metric(describe = "Reduction in flashblocks issued.")] + pub flashblocks_missing_counter: Counter, +} + +impl FlashblocksServiceMetrics { + pub fn record_flashblocks(&self, flashblocks_count: u64, max_flashblocks: u64) { + let reduced_flashblocks = max_flashblocks.saturating_sub(flashblocks_count); + self.flashblocks_gauge.set(flashblocks_count as f64); + self.flashblocks_counter.increment(flashblocks_count); + self.flashblocks_missing_histogram.record(reduced_flashblocks as f64); + self.flashblocks_missing_gauge.set(reduced_flashblocks as f64); + self.flashblocks_missing_counter.increment(reduced_flashblocks); + } +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/flashblocks/mod.rs b/rust/rollup-boost/crates/rollup-boost/src/flashblocks/mod.rs new file mode 100644 index 0000000000000..1499a26da6621 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/flashblocks/mod.rs @@ -0,0 +1,14 @@ +mod launcher; + +pub use launcher::*; + +mod service; +pub use service::*; + +mod inbound; +mod outbound; + +mod args; +pub use args::*; + +mod metrics; diff --git a/rust/rollup-boost/crates/rollup-boost/src/flashblocks/outbound.rs b/rust/rollup-boost/crates/rollup-boost/src/flashblocks/outbound.rs new file mode 100644 index 0000000000000..566500f299a2e --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/flashblocks/outbound.rs @@ -0,0 +1,237 @@ +use core::{ + fmt::{Debug, Formatter}, + net::SocketAddr, + pin::Pin, + sync::atomic::{AtomicUsize, Ordering}, + task::{Context, Poll}, +}; +use futures::{Sink, SinkExt, StreamExt}; +use op_alloy_rpc_types_engine::OpFlashblockPayload; +use std::{io, net::TcpListener, sync::Arc}; +use tokio::{ + net::TcpStream, + sync::{ + broadcast::{self, Receiver, error::RecvError}, + watch, + }, +}; +use tokio_tungstenite::{ + WebSocketStream, accept_async, + tungstenite::{Message, Utf8Bytes}, +}; + +/// A WebSockets publisher that accepts connections from client websockets and broadcasts to them +/// updates about new flashblocks. It maintains a count of sent messages and active subscriptions. +/// +/// This is modelled as a `futures::Sink` that can be used to send `OpFlashblockPayload` messages. +pub struct WebSocketPublisher { + sent: Arc, + subs: Arc, + term: watch::Sender, + pipe: broadcast::Sender, +} + +impl WebSocketPublisher { + pub fn new(addr: SocketAddr) -> io::Result { + let (pipe, _) = broadcast::channel(100); + let (term, _) = watch::channel(false); + + let sent = Arc::new(AtomicUsize::new(0)); + let subs = Arc::new(AtomicUsize::new(0)); + let listener = TcpListener::bind(addr)?; + + tokio::spawn(listener_loop( + listener, + pipe.subscribe(), + term.subscribe(), + Arc::clone(&sent), + Arc::clone(&subs), + )); + + Ok(Self { sent, subs, term, pipe }) + } + + pub fn publish(&self, payload: &OpFlashblockPayload) -> io::Result<()> { + // Serialize the payload to a UTF-8 string + // serialize only once, then just copy around only a pointer + // to the serialized data for each subscription. + let serialized = serde_json::to_string(payload)?; + let utf8_bytes = Utf8Bytes::from(serialized); + + // Send the serialized payload to all subscribers + self.pipe + .send(utf8_bytes) + .map_err(|e| io::Error::new(io::ErrorKind::ConnectionAborted, e))?; + Ok(()) + } +} + +impl Drop for WebSocketPublisher { + fn drop(&mut self) { + // Notify the listener loop to terminate + let _ = self.term.send(true); + tracing::info!("WebSocketPublisher dropped, terminating listener loop"); + } +} + +async fn listener_loop( + listener: TcpListener, + receiver: Receiver, + term: watch::Receiver, + sent: Arc, + subs: Arc, +) { + listener.set_nonblocking(true).expect("Failed to set TcpListener socket to non-blocking"); + + let listener = tokio::net::TcpListener::from_std(listener) + .expect("Failed to convert TcpListener to tokio TcpListener"); + + let listen_addr = listener.local_addr().expect("Failed to get local address of listener"); + tracing::info!("Flashblocks WebSocketPublisher listening on {listen_addr}"); + + let mut term = term; + + loop { + let subs = Arc::clone(&subs); + + tokio::select! { + // drop this connection if the `WebSocketPublisher` is dropped + _ = term.changed() => { + if *term.borrow() { + return; + } + } + + // Accept new connections on the websocket listener + // when a new connection is established, spawn a dedicated task to handle + // the connection and broadcast with that connection. + Ok((connection, peer_addr)) = listener.accept() => { + let sent = Arc::clone(&sent); + let term = term.clone(); + let receiver_clone = receiver.resubscribe(); + + match accept_async(connection).await { + Ok(stream) => { + tokio::spawn(async move { + subs.fetch_add(1, Ordering::Relaxed); + tracing::debug!("WebSocket connection established with {}", peer_addr); + + // Handle the WebSocket connection in a dedicated task + broadcast_loop(stream, term, receiver_clone, sent).await; + + subs.fetch_sub(1, Ordering::Relaxed); + tracing::debug!("WebSocket connection closed for {}", peer_addr); + }); + } + Err(e) => { + tracing::warn!("Failed to accept WebSocket connection from {peer_addr}: {e}"); + } + } + } + } + } +} + +/// An instance of this loop is spawned for each connected WebSocket client. +/// It listens for broadcast updates about new flashblocks and sends them to the client. +/// It also handles termination signals to gracefully close the connection. +/// Any connectivity errors will terminate the loop, which will in turn +/// decrement the subscription count in the `WebSocketPublisher`. +async fn broadcast_loop( + stream: WebSocketStream, + term: watch::Receiver, + blocks: broadcast::Receiver, + sent: Arc, +) { + let mut term = term; + let mut blocks = blocks; + let Ok(peer_addr) = stream.get_ref().peer_addr() else { + return; + }; + let (mut sink, mut stream_read) = stream.split(); + + loop { + tokio::select! { + // Check if the publisher is terminated + _ = term.changed() => { + if *term.borrow() { + tracing::info!("WebSocketPublisher is terminating, closing broadcast loop"); + return; + } + } + + // Handle incoming WebSocket messages (including pings) + msg = stream_read.next() => { + match msg { + Some(Ok(_)) => { + // Ignore all inbound frames. + // Tungstenite will auto-respond to Ping and handle Close internally. + } + Some(Err(e)) => { + tracing::debug!("WebSocket error from {peer_addr}: {e}"); + break; + } + None => { + tracing::debug!("WebSocket stream ended for {peer_addr}"); + break; + } + } + } + + // Receive payloads from the broadcast channel + payload = blocks.recv() => match payload { + Ok(payload) => { + // Here you would typically send the payload to the WebSocket clients. + // For this example, we just increment the sent counter. + sent.fetch_add(1, Ordering::Relaxed); + + tracing::debug!("Broadcasted payload: {:?}", payload); + if let Err(e) = sink.send(Message::Text(payload)).await { + tracing::debug!("Closing flashblocks subscription for {peer_addr}: {e}"); + break; // Exit the loop if sending fails + } + } + Err(RecvError::Closed) => { + tracing::debug!("Broadcast channel closed, exiting broadcast loop"); + return; + } + Err(RecvError::Lagged(_)) => { + tracing::warn!("Broadcast channel lagged, some messages were dropped"); + } + }, + } + } +} + +impl Debug for WebSocketPublisher { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + let subs = self.subs.load(Ordering::Relaxed); + let sent = self.sent.load(Ordering::Relaxed); + + f.debug_struct("WebSocketPublisher") + .field("subs", &subs) + .field("payloads_sent", &sent) + .finish() + } +} + +impl Sink<&OpFlashblockPayload> for WebSocketPublisher { + type Error = eyre::Report; + + fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn start_send(self: Pin<&mut Self>, item: &OpFlashblockPayload) -> Result<(), Self::Error> { + self.publish(item)?; + Ok(()) + } + + fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/flashblocks/service.rs b/rust/rollup-boost/crates/rollup-boost/src/flashblocks/service.rs new file mode 100644 index 0000000000000..5c563d525a5a7 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/flashblocks/service.rs @@ -0,0 +1,496 @@ +use super::outbound::WebSocketPublisher; +use crate::{ + ClientResult, EngineApiExt, RpcClient, flashblocks::metrics::FlashblocksServiceMetrics, +}; +use alloy_primitives::U256; +use alloy_rpc_types_engine::{ + BlobsBundleV1, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, ForkchoiceState, + ForkchoiceUpdated, PayloadId, PayloadStatus, +}; +use alloy_rpc_types_eth::{Block, BlockNumberOrTag}; +use core::net::SocketAddr; +use jsonrpsee::core::async_trait; +use op_alloy_rpc_types_engine::{ + OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4, OpExecutionPayloadV4, + OpFlashblockPayload, OpFlashblockPayloadBase, OpFlashblockPayloadDelta, OpPayloadAttributes, +}; +use rollup_boost_types::payload::{NewPayload, OpExecutionPayloadEnvelope, PayloadVersion}; +use std::{ + io, + sync::{ + Arc, + atomic::{AtomicU64, Ordering}, + }, +}; +use thiserror::Error; +use tokio::sync::{RwLock, mpsc}; +use tracing::{debug, error, info}; + +#[derive(Debug, Error, PartialEq)] +pub enum FlashblocksError { + #[error("Missing base payload for initial flashblock")] + MissingBasePayload, + #[error("Unexpected base payload for non-initial flashblock")] + UnexpectedBasePayload, + #[error("Missing delta for flashblock")] + MissingDelta, + #[error("Invalid index for flashblock")] + InvalidIndex, + #[error("Missing payload")] + MissingPayload, + #[error("invalid authorizer signature")] + InvalidAuthorizerSig, +} + +// Simplify actor messages to just handle shutdown +#[derive(Debug)] +enum FlashblocksEngineMessage { + OpFlashblockPayload(OpFlashblockPayload), +} + +#[derive(Clone, Debug, Default)] +pub struct FlashblockBuilder { + base: Option, + flashblocks: Vec, +} + +impl FlashblockBuilder { + pub fn new() -> Self { + Self::default() + } + + pub fn extend(&mut self, payload: OpFlashblockPayload) -> Result<(), FlashblocksError> { + tracing::debug!(message = "Extending payload", payload_id = %payload.payload_id, index = payload.index, has_base=payload.base.is_some()); + + // Validate the index is contiguous + if payload.index != self.flashblocks.len() as u64 { + return Err(FlashblocksError::InvalidIndex); + } + + // Check base payload rules + if payload.index == 0 { + if let Some(base) = payload.base { + self.base = Some(base) + } else { + return Err(FlashblocksError::MissingBasePayload); + } + } else if payload.base.is_some() { + return Err(FlashblocksError::UnexpectedBasePayload); + } + + // Update latest diff and accumulate transactions and withdrawals + self.flashblocks.push(payload.diff); + + Ok(()) + } + + pub fn into_envelope( + self, + version: PayloadVersion, + ) -> Result { + self.build_envelope(version) + } + + pub fn build_envelope( + &self, + version: PayloadVersion, + ) -> Result { + let base = self.base.as_ref().ok_or(FlashblocksError::MissingPayload)?; + + // There must be at least one delta + let diff = self.flashblocks.last().ok_or(FlashblocksError::MissingDelta)?; + + let (transactions, withdrawals) = self.flashblocks.iter().fold( + (Vec::new(), Vec::new()), + |(mut transactions, mut withdrawals), delta| { + transactions.extend(delta.transactions.clone()); + withdrawals.extend(delta.withdrawals.clone()); + (transactions, withdrawals) + }, + ); + + let withdrawals_root = diff.withdrawals_root; + + let execution_payload = ExecutionPayloadV3 { + blob_gas_used: diff.blob_gas_used.unwrap_or(0), + excess_blob_gas: 0, + payload_inner: ExecutionPayloadV2 { + withdrawals, + payload_inner: ExecutionPayloadV1 { + parent_hash: base.parent_hash, + fee_recipient: base.fee_recipient, + state_root: diff.state_root, + receipts_root: diff.receipts_root, + logs_bloom: diff.logs_bloom, + prev_randao: base.prev_randao, + block_number: base.block_number, + gas_limit: base.gas_limit, + gas_used: diff.gas_used, + timestamp: base.timestamp, + extra_data: base.extra_data.clone(), + base_fee_per_gas: base.base_fee_per_gas, + block_hash: diff.block_hash, + transactions, + }, + }, + }; + + match version { + PayloadVersion::V3 => { + Ok(OpExecutionPayloadEnvelope::V3(OpExecutionPayloadEnvelopeV3 { + parent_beacon_block_root: base.parent_beacon_block_root, + block_value: U256::ZERO, + blobs_bundle: BlobsBundleV1::default(), + should_override_builder: false, + execution_payload, + })) + } + PayloadVersion::V4 => { + Ok(OpExecutionPayloadEnvelope::V4(OpExecutionPayloadEnvelopeV4 { + parent_beacon_block_root: base.parent_beacon_block_root, + block_value: U256::ZERO, + blobs_bundle: BlobsBundleV1::default(), + should_override_builder: false, + execution_payload: OpExecutionPayloadV4 { + withdrawals_root, + payload_inner: execution_payload, + }, + execution_requests: vec![], + })) + } + } + } +} + +#[derive(Clone, Debug)] +pub struct FlashblocksService { + client: RpcClient, + + /// Current payload ID we're processing. Set from local payload calculation and updated from + /// external source. + /// None means rollup-boost has not served FCU with attributes yet. + current_payload_id: Arc>>, + + /// The current Flashblock's payload being constructed. + best_payload: Arc>, + + /// Websocket publisher for sending valid pre-confirmations to clients. + ws_pub: Arc, + + /// Metrics + metrics: FlashblocksServiceMetrics, + + /// Atomic to track absolute maximum number of flashblocks used is block building. + /// This used to measures the reduction in flashblocks issued. + max_flashblocks: Arc, +} + +impl FlashblocksService { + pub fn new(client: RpcClient, outbound_addr: SocketAddr) -> io::Result { + let ws_pub = WebSocketPublisher::new(outbound_addr)?.into(); + + Ok(Self { + client, + current_payload_id: Arc::new(RwLock::new(None)), + best_payload: Arc::new(RwLock::new(FlashblockBuilder::new())), + ws_pub, + metrics: Default::default(), + max_flashblocks: Arc::new(AtomicU64::new(0)), + }) + } + + pub async fn get_best_payload( + &self, + version: PayloadVersion, + payload_id: PayloadId, + ) -> Result { + // Check that we have flashblocks for correct payload + if *self.current_payload_id.read().await != Some(payload_id) { + // We have outdated `current_payload_id` so we should fallback to get_payload + // Clearing best_payload in here would cause situation when old `get_payload` would + // clear currently built correct flashblocks. + // This will self-heal on the next FCU. + return Err(FlashblocksError::MissingPayload); + } + // consume the best payload and reset the builder + let payload = { + let mut builder = self.best_payload.write().await; + let flashblocks_number = builder.flashblocks.len() as u64; + let max_flashblocks = self + .max_flashblocks + .fetch_max(flashblocks_number, Ordering::Relaxed) + .max(flashblocks_number); + self.metrics.record_flashblocks(flashblocks_number, max_flashblocks); + tracing::Span::current().record("flashblocks_count", flashblocks_number); + // Take payload and place new one in its place in one go to avoid double locking + std::mem::replace(&mut *builder, FlashblockBuilder::new()).into_envelope(version)? + }; + + Ok(payload) + } + + pub async fn set_current_payload_id(&self, payload_id: PayloadId) { + tracing::debug!(message = "Setting current payload ID", payload_id = %payload_id); + *self.current_payload_id.write().await = Some(payload_id); + // Current state won't be useful anymore because chain progressed + *self.best_payload.write().await = FlashblockBuilder::new(); + } + + async fn on_event(&mut self, event: FlashblocksEngineMessage) { + match event { + FlashblocksEngineMessage::OpFlashblockPayload(payload) => { + self.metrics.messages_processed.increment(1); + + tracing::debug!( + message = "Received flashblock payload", + payload_id = %payload.payload_id, + index = payload.index + ); + + // Make sure the payload id matches the current payload id + // If local payload id is non then boost is not service FCU with attributes + match *self.current_payload_id.read().await { + Some(payload_id) => { + if payload_id != payload.payload_id { + self.metrics.current_payload_id_mismatch.increment(1); + error!( + message = "Payload ID mismatch", + payload_id = %payload.payload_id, + local_payload_id = %payload_id, + index = payload.index, + ); + return; + } + } + None => { + // We haven't served FCU with attributes yet, just ignore flashblocks + debug!( + message = "Received flashblocks, but no FCU with attributes was sent", + payload_id = %payload.payload_id, + index = payload.index, + ); + return; + } + } + + if let Err(e) = self.best_payload.write().await.extend(payload.clone()) { + self.metrics.extend_payload_errors.increment(1); + error!( + message = "Failed to extend payload", + error = %e, + payload_id = %payload.payload_id, + index = payload.index, + ); + } else { + // Broadcast the valid message + if let Err(e) = self.ws_pub.publish(&payload) { + error!( + message = "Failed to broadcast payload", + error = %e, + payload_id = %payload.payload_id, + index = payload.index, + ); + } + } + } + } + } + + pub async fn run(&mut self, mut stream: mpsc::Receiver) { + while let Some(event) = stream.recv().await { + self.on_event(FlashblocksEngineMessage::OpFlashblockPayload(event)).await; + } + } +} + +#[async_trait] +impl EngineApiExt for FlashblocksService { + async fn fork_choice_updated_v3( + &self, + fork_choice_state: ForkchoiceState, + payload_attributes: Option, + ) -> ClientResult { + // Calculate and set expected payload_id + if let Some(attr) = &payload_attributes { + let payload_id = attr.payload_id(&fork_choice_state.head_block_hash, 3); + self.set_current_payload_id(payload_id).await; + } + + let resp = + self.client.fork_choice_updated_v3(fork_choice_state, payload_attributes).await?; + + if let Some(payload_id) = resp.payload_id { + let current_payload = *self.current_payload_id.read().await; + if current_payload != Some(payload_id) { + tracing::error!( + message = "Payload id returned by builder differs from calculated. Using builder payload id", + builder_payload_id = %payload_id, + calculated_payload_id = %current_payload.unwrap_or_default(), + ); + self.set_current_payload_id(payload_id).await; + } else { + tracing::debug!(message = "Forkchoice updated", payload_id = %payload_id); + } + } else { + tracing::debug!(message = "Forkchoice updated with no payload ID"); + } + Ok(resp) + } + + async fn new_payload(&self, new_payload: NewPayload) -> ClientResult { + self.client.new_payload(new_payload).await + } + + async fn get_payload( + &self, + payload_id: PayloadId, + version: PayloadVersion, + ) -> ClientResult { + // First try to get the best flashblocks payload from the builder if it exists + + match self.get_best_payload(version, payload_id).await { + Ok(payload) => { + info!(message = "Returning fb payload"); + // This will finalise block building in builder. + let client = self.client.clone(); + tokio::spawn(async move { + if let Err(e) = client.get_payload(payload_id, version).await { + error!( + message = "Failed to send finalising getPayload to builder", + error = %e, + ); + } + }); + Ok(payload) + } + Err(e) => { + error!(message = "Error getting fb best payload, falling back on client", error = %e); + info!(message = "Falling back to get_payload on client", payload_id = %payload_id); + let result = self.client.get_payload(payload_id, version).await?; + Ok(result) + } + } + } + + async fn get_block_by_number( + &self, + number: BlockNumberOrTag, + full: bool, + ) -> ClientResult { + self.client.get_block_by_number(number, full).await + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::server::tests::{MockEngineServer, spawn_server}; + use alloy_rpc_types_engine::JwtSecret; + use http::Uri; + use rollup_boost_types::payload::PayloadSource; + use std::str::FromStr; + + /// Test that we fallback to the getPayload method if the flashblocks payload is not available + #[tokio::test] + async fn test_flashblocks_fallback_to_get_payload() -> eyre::Result<()> { + let builder_mock: MockEngineServer = MockEngineServer::new(); + let (_fallback_server, fallback_server_addr) = spawn_server(builder_mock.clone()).await; + let jwt_secret = JwtSecret::random(); + + let builder_auth_rpc = Uri::from_str(&format!("http://{fallback_server_addr}")).unwrap(); + let builder_client = + RpcClient::new(builder_auth_rpc.clone(), jwt_secret, 2000, PayloadSource::Builder)?; + + let service = + FlashblocksService::new(builder_client, "127.0.0.1:8000".parse().unwrap()).unwrap(); + + // by default, builder_mock returns a valid payload always + service.get_payload(PayloadId::default(), PayloadVersion::V3).await?; + + let get_payload_requests_builder = builder_mock.get_payload_requests.clone(); + assert_eq!(get_payload_requests_builder.lock().len(), 1); + + Ok(()) + } + + /// Test that we don't return block from flashblocks if payload_id is different + #[tokio::test] + async fn test_flashblocks_different_payload_id() -> eyre::Result<()> { + let builder_mock: MockEngineServer = MockEngineServer::new(); + let (_fallback_server, fallback_server_addr) = spawn_server(builder_mock.clone()).await; + let jwt_secret = JwtSecret::random(); + + let builder_auth_rpc = Uri::from_str(&format!("http://{fallback_server_addr}")).unwrap(); + let builder_client = + RpcClient::new(builder_auth_rpc.clone(), jwt_secret, 2000, PayloadSource::Builder)?; + + let service = + FlashblocksService::new(builder_client, "127.0.0.1:8001".parse().unwrap()).unwrap(); + + // Some "random" payload id + *service.current_payload_id.write().await = Some(PayloadId::new([1, 1, 1, 1, 1, 1, 1, 1])); + + // We ensure that request will skip rollup-boost and serve payload from backup if payload id + // don't match + service.get_payload(PayloadId::default(), PayloadVersion::V3).await?; + + let get_payload_requests_builder = builder_mock.get_payload_requests.clone(); + assert_eq!(get_payload_requests_builder.lock().len(), 1); + + Ok(()) + } + + #[tokio::test] + async fn test_flashblocks_builder() -> eyre::Result<()> { + let mut builder = FlashblockBuilder::new(); + + // Error: First payload must have a base + let result = builder.extend(OpFlashblockPayload { + payload_id: PayloadId::default(), + index: 0, + ..Default::default() + }); + assert!(result.is_err()); + assert_eq!(result.unwrap_err(), FlashblocksError::MissingBasePayload); + + // Ok: First payload is correct if it has base and index 0 + let result = builder.extend(OpFlashblockPayload { + payload_id: PayloadId::default(), + index: 0, + base: Some(OpFlashblockPayloadBase { ..Default::default() }), + ..Default::default() + }); + assert!(result.is_ok()); + + // Error: First payload must have index 0 + let result = builder.extend(OpFlashblockPayload { + payload_id: PayloadId::default(), + index: 1, + base: Some(OpFlashblockPayloadBase { ..Default::default() }), + ..Default::default() + }); + assert!(result.is_err()); + assert_eq!(result.unwrap_err(), FlashblocksError::UnexpectedBasePayload); + + // Error: Second payload must have a follow-up index + let result = builder.extend(OpFlashblockPayload { + payload_id: PayloadId::default(), + index: 2, + base: None, + ..Default::default() + }); + assert!(result.is_err()); + assert_eq!(result.unwrap_err(), FlashblocksError::InvalidIndex); + + // Ok: Second payload has the correct index + let result = builder.extend(OpFlashblockPayload { + payload_id: PayloadId::default(), + index: 1, + base: None, + ..Default::default() + }); + assert!(result.is_ok()); + + Ok(()) + } +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/health.rs b/rust/rollup-boost/crates/rollup-boost/src/health.rs new file mode 100644 index 0000000000000..fec56c8d7eadc --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/health.rs @@ -0,0 +1,579 @@ +use std::{ + sync::Arc, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; + +use alloy_rpc_types_eth::BlockNumberOrTag; +use parking_lot::Mutex; +use tokio::{ + task::JoinHandle, + time::{Instant, sleep_until}, +}; +use tracing::warn; + +use crate::{EngineApiExt, ExecutionMode, Health, Probes}; + +pub struct HealthHandle { + pub probes: Arc, + pub execution_mode: Arc>, + pub l2_client: Arc, + pub builder_client: Arc, + pub health_check_interval: Duration, + pub max_unsafe_interval: u64, +} + +impl HealthHandle { + /// Creates a new instance of [`HealthHandle`]. + pub fn new( + probes: Arc, + execution_mode: Arc>, + l2_client: Arc, + builder_client: Arc, + health_check_interval: Duration, + max_unsafe_interval: u64, + ) -> Self { + Self { + probes, + execution_mode, + l2_client, + builder_client, + health_check_interval, + max_unsafe_interval, + } + } + + /// Periodically checks that the latest unsafe block timestamp is not older than the + /// the current time minus the max_unsafe_interval. + pub fn spawn(self) -> JoinHandle<()> { + tokio::spawn(async move { + let mut timestamp = MonotonicTimestamp::new(); + + loop { + let t = timestamp.tick(); + + // Check L2 client health. If its unhealthy, set the health status to + // ServiceUnavailable If in disabled or dry run execution mode, set + // the health status to Healthy if the l2 client is healthy + match self.l2_client.get_block_by_number(BlockNumberOrTag::Latest, false).await { + Ok(block) => { + if t.saturating_sub(block.header.timestamp).gt(&self.max_unsafe_interval) { + warn!(target: "rollup_boost::health", curr_unix = %t, unsafe_unix = %block.header.timestamp, "L2 client - unsafe block timestamp is too old, updating health status to ServiceUnavailable"); + self.probes.set_health(Health::ServiceUnavailable); + sleep_until(Instant::now() + self.health_check_interval).await; + continue; + } else if self.execution_mode.lock().is_disabled() || + self.execution_mode.lock().is_dry_run() + { + self.probes.set_health(Health::Healthy); + sleep_until(Instant::now() + self.health_check_interval).await; + continue; + } + } + Err(e) => { + warn!(target: "rollup_boost::health", "L2 client - Failed to get unsafe block {} - updating health status", e); + self.probes.set_health(Health::ServiceUnavailable); + sleep_until(Instant::now() + self.health_check_interval).await; + continue; + } + }; + + if self.execution_mode.lock().is_enabled() { + // Only check builder client health if execution mode is enabled + // If its unhealthy, set the health status to PartialContent + match self + .builder_client + .get_block_by_number(BlockNumberOrTag::Latest, false) + .await + { + Ok(block) => { + if t.saturating_sub(block.header.timestamp) + .gt(&self.max_unsafe_interval) + { + warn!(target: "rollup_boost::health", curr_unix = %t, unsafe_unix = %block.header.timestamp, "Builder client - unsafe block timestamp is too old updating health status"); + self.probes.set_health(Health::PartialContent); + } else { + self.probes.set_health(Health::Healthy); + } + } + Err(e) => { + warn!(target: "rollup_boost::health", "Builder client - Failed to get unsafe block {} - updating health status", e); + self.probes.set_health(Health::PartialContent); + } + }; + } + sleep_until(Instant::now() + self.health_check_interval).await; + } + }) + } +} + +/// A monotonic wall-clock timestamp tracker that resists system clock changes. +/// +/// This struct provides a way to generate wall-clock-like timestamps that are +/// guaranteed to be monotonic (i.e., never go backward), even if the system +/// time is adjusted (e.g., via NTP, manual clock changes, or suspend/resume). +/// +/// - It tracks elapsed time using `Instant` to ensure monotonic progression. +/// - It produces a synthetic wall-clock timestamp that won't regress. +pub struct MonotonicTimestamp { + /// The last known UNIX timestamp in seconds. + pub last_unix: u64, + + /// The last monotonic time reference. + pub last_instant: Instant, +} + +impl Default for MonotonicTimestamp { + fn default() -> Self { + Self::new() + } +} + +impl MonotonicTimestamp { + pub fn new() -> Self { + let last_unix = + SystemTime::now().duration_since(UNIX_EPOCH).expect("Time went backwards").as_secs(); + let last_instant = Instant::now(); + Self { last_unix, last_instant } + } + + fn tick(&mut self) -> u64 { + let elapsed = self.last_instant.elapsed().as_secs(); + self.last_unix += elapsed; + self.last_instant = Instant::now(); + self.last_unix + } +} + +#[cfg(test)] +mod tests { + use std::net::SocketAddr; + + use alloy_consensus::Header; + use alloy_rpc_types_engine::JwtSecret; + use alloy_rpc_types_eth::{Block, Header as EthHeader, Transaction}; + + use crate::RpcClient; + use http::Uri; + use http_body_util::BodyExt; + use hyper::service::service_fn; + use hyper_util::rt::TokioIo; + use serde_json::json; + use tokio::net::TcpListener; + + use super::*; + use crate::Probes; + use rollup_boost_types::{self, payload::PayloadSource}; + use serial_test::serial; + + pub struct MockHttpServer { + addr: SocketAddr, + join_handle: JoinHandle<()>, + } + + impl Drop for MockHttpServer { + fn drop(&mut self) { + self.join_handle.abort(); + } + } + + impl MockHttpServer { + async fn serve( + f: fn(hyper::Request, timestamp: u64) -> S, + timestamp: u64, + ) -> eyre::Result + where + S: Future, hyper::Error>> + + Send + + Sync + + 'static, + { + { + let listener = TcpListener::bind("127.0.0.1:0").await?; + let addr = listener.local_addr()?; + + let handle = tokio::spawn(async move { + loop { + match listener.accept().await { + Ok((stream, _)) => { + let io = TokioIo::new(stream); + tokio::spawn(async move { + if let Err(err) = hyper::server::conn::http1::Builder::new() + .serve_connection( + io, + service_fn(move |req| f(req, timestamp)), + ) + .await + { + eprintln!("Error serving connection: {err}"); + } + }); + } + Err(e) => eprintln!("Error accepting connection: {e}"), + } + } + }); + + Ok(Self { addr, join_handle: handle }) + } + } + } + + async fn handler( + req: hyper::Request, + block_timestamp: u64, + ) -> Result, hyper::Error> { + let body_bytes = match req.into_body().collect().await { + Ok(buf) => buf.to_bytes(), + Err(_) => { + let error_response = json!({ + "jsonrpc": "2.0", + "error": { "code": -32700, "message": "Failed to read request body" }, + "id": null + }); + return Ok(hyper::Response::new(error_response.to_string())); + } + }; + + let request_body: serde_json::Value = match serde_json::from_slice(&body_bytes) { + Ok(json) => json, + Err(_) => { + let error_response = json!({ + "jsonrpc": "2.0", + "error": { "code": -32700, "message": "Invalid JSON format" }, + "id": null + }); + return Ok(hyper::Response::new(error_response.to_string())); + } + }; + + let method = request_body["method"].as_str().unwrap_or_default(); + + let mock_block = Block:: { + header: EthHeader { + inner: Header { timestamp: block_timestamp, ..Default::default() }, + ..Default::default() + }, + ..Default::default() + }; + + let response = match method { + "eth_getBlockByNumber" => json!({ + "jsonrpc": "2.0", + "result": mock_block, + "id": request_body["id"] + }), + _ => { + let error_response = json!({ + "jsonrpc": "2.0", + "error": { "code": -32601, "message": "Method not found" }, + "id": request_body["id"] + }); + return Ok(hyper::Response::new(error_response.to_string())); + } + }; + + Ok(hyper::Response::new(response.to_string())) + } + + #[serial] + #[tokio::test] + async fn test_health_check_healthy() -> eyre::Result<()> { + let probes = Arc::new(Probes::default()); + let now = + SystemTime::now().duration_since(UNIX_EPOCH).expect("Time went backwards").as_secs(); + + let l2 = MockHttpServer::serve(handler, now).await.unwrap(); + let l2_client = Arc::new(RpcClient::new( + format!("http://{}", l2.addr).parse::()?, + JwtSecret::random(), + 100, + PayloadSource::L2, + )?); + + let builder = MockHttpServer::serve(handler, now).await.unwrap(); + let builder_client = Arc::new(RpcClient::new( + format!("http://{}", builder.addr).parse::()?, + JwtSecret::random(), + 100, + PayloadSource::Builder, + )?); + + let health_handle = HealthHandle { + probes: probes.clone(), + execution_mode: Arc::new(Mutex::new(ExecutionMode::Enabled)), + l2_client: l2_client.clone(), + builder_client: builder_client.clone(), + health_check_interval: Duration::from_secs(60), + max_unsafe_interval: 5, + }; + + health_handle.spawn(); + tokio::time::sleep(Duration::from_secs(2)).await; + assert!(matches!(probes.health(), Health::Healthy)); + Ok(()) + } + + #[serial] + #[tokio::test] + async fn test_health_check_builder_exceeds_max_unsafe_interval() -> eyre::Result<()> { + let probes = Arc::new(Probes::default()); + let now = + SystemTime::now().duration_since(UNIX_EPOCH).expect("Time went backwards").as_secs(); + + // L2 healthy + let l2 = MockHttpServer::serve(handler, now).await.unwrap(); + let l2_client = Arc::new(RpcClient::new( + format!("http://{}", l2.addr).parse::()?, + JwtSecret::random(), + 100, + PayloadSource::L2, + )?); + + // Builder unhealthy + let builder = MockHttpServer::serve(handler, now - 10).await.unwrap(); + let builder_client = Arc::new(RpcClient::new( + format!("http://{}", builder.addr).parse::()?, + JwtSecret::random(), + 100, + PayloadSource::Builder, + )?); + + let health_handle = HealthHandle { + probes: probes.clone(), + execution_mode: Arc::new(Mutex::new(ExecutionMode::Enabled)), + l2_client: l2_client.clone(), + builder_client: builder_client.clone(), + health_check_interval: Duration::from_secs(60), + max_unsafe_interval: 5, + }; + + health_handle.spawn(); + tokio::time::sleep(Duration::from_secs(2)).await; + assert!(matches!(probes.health(), Health::PartialContent)); + Ok(()) + } + + #[serial] + #[tokio::test] + async fn test_health_check_l2_exceeds_max_unsafe_interval() -> eyre::Result<()> { + let probes = Arc::new(Probes::default()); + let now = + SystemTime::now().duration_since(UNIX_EPOCH).expect("Time went backwards").as_secs(); + + // L2 healthy unhealth + let l2 = MockHttpServer::serve(handler, now - 10).await.unwrap(); + let l2_client = Arc::new(RpcClient::new( + format!("http://{}", l2.addr).parse::()?, + JwtSecret::random(), + 100, + PayloadSource::L2, + )?); + + // Builder healthy + let builder = MockHttpServer::serve(handler, now).await.unwrap(); + let builder_client = Arc::new(RpcClient::new( + format!("http://{}", builder.addr).parse::()?, + JwtSecret::random(), + 100, + PayloadSource::Builder, + )?); + + let health_handle = HealthHandle { + probes: probes.clone(), + execution_mode: Arc::new(Mutex::new(ExecutionMode::Enabled)), + l2_client: l2_client.clone(), + builder_client: builder_client.clone(), + health_check_interval: Duration::from_secs(60), + max_unsafe_interval: 5, + }; + + health_handle.spawn(); + tokio::time::sleep(Duration::from_secs(2)).await; + assert!(matches!(probes.health(), Health::ServiceUnavailable)); + Ok(()) + } + + #[serial] + #[tokio::test] + async fn test_health_check_exceeds_max_unsafe_interval_execution_mode_disabled() + -> eyre::Result<()> { + let probes = Arc::new(Probes::default()); + let now = + SystemTime::now().duration_since(UNIX_EPOCH).expect("Time went backwards").as_secs(); + // L2 healthy + let l2 = MockHttpServer::serve(handler, now).await.unwrap(); + let l2_client = Arc::new(RpcClient::new( + format!("http://{}", l2.addr).parse::()?, + JwtSecret::random(), + 100, + PayloadSource::L2, + )?); + let builder = MockHttpServer::serve(handler, now - 10).await.unwrap(); + + let builder_client = Arc::new(RpcClient::new( + format!("http://{}", builder.addr).parse::()?, + JwtSecret::random(), + 100, + PayloadSource::Builder, + )?); + + let health_handle = HealthHandle { + probes: probes.clone(), + execution_mode: Arc::new(Mutex::new(ExecutionMode::Disabled)), + l2_client: l2_client.clone(), + builder_client: builder_client.clone(), + health_check_interval: Duration::from_secs(60), + max_unsafe_interval: 5, + }; + + health_handle.spawn(); + tokio::time::sleep(Duration::from_secs(2)).await; + assert!(matches!(probes.health(), Health::Healthy)); + Ok(()) + } + + #[serial] + #[tokio::test] + async fn test_health_check_exceeds_max_unsafe_interval_execution_mode_dryrun() + -> eyre::Result<()> { + let probes = Arc::new(Probes::default()); + let now = + SystemTime::now().duration_since(UNIX_EPOCH).expect("Time went backwards").as_secs(); + // L2 healthy + let l2 = MockHttpServer::serve(handler, now).await.unwrap(); + let l2_client = Arc::new(RpcClient::new( + format!("http://{}", l2.addr).parse::()?, + JwtSecret::random(), + 100, + PayloadSource::L2, + )?); + let builder = MockHttpServer::serve(handler, now - 10).await.unwrap(); + + let builder_client = Arc::new(RpcClient::new( + format!("http://{}", builder.addr).parse::()?, + JwtSecret::random(), + 100, + PayloadSource::Builder, + )?); + + let health_handle = HealthHandle { + probes: probes.clone(), + execution_mode: Arc::new(Mutex::new(ExecutionMode::DryRun)), + l2_client: l2_client.clone(), + builder_client: builder_client.clone(), + health_check_interval: Duration::from_secs(60), + max_unsafe_interval: 5, + }; + + health_handle.spawn(); + tokio::time::sleep(Duration::from_secs(2)).await; + assert!(matches!(probes.health(), Health::Healthy)); + Ok(()) + } + + #[serial] + #[tokio::test] + async fn test_health_check_service_builder_unavailable() -> eyre::Result<()> { + let probes = Arc::new(Probes::default()); + let now = + SystemTime::now().duration_since(UNIX_EPOCH).expect("Time went backwards").as_secs(); + // L2 healthy + let l2 = MockHttpServer::serve(handler, now).await.unwrap(); + let l2_client = Arc::new(RpcClient::new( + format!("http://{}", l2.addr).parse::()?, + JwtSecret::random(), + 100, + PayloadSource::L2, + )?); + + // Builder unhealthy + let builder_client = Arc::new(RpcClient::new( + "http://127.0.0.1:6000".parse::()?, + JwtSecret::random(), + 100, + PayloadSource::Builder, + )?); + + let health_handle = HealthHandle { + probes: probes.clone(), + execution_mode: Arc::new(Mutex::new(ExecutionMode::Enabled)), + l2_client: l2_client.clone(), + builder_client: builder_client.clone(), + health_check_interval: Duration::from_secs(60), + max_unsafe_interval: 5, + }; + + health_handle.spawn(); + tokio::time::sleep(Duration::from_secs(2)).await; + assert!(matches!(probes.health(), Health::PartialContent)); + Ok(()) + } + + #[serial] + #[tokio::test] + async fn test_health_check_service_l2_unavailable() -> eyre::Result<()> { + let probes = Arc::new(Probes::default()); + let now = + SystemTime::now().duration_since(UNIX_EPOCH).expect("Time went backwards").as_secs(); + + // L2 returns an error + let l2_client = Arc::new(RpcClient::new( + "http://127.0.0.1:6000".parse::()?, + JwtSecret::random(), + 100, + PayloadSource::L2, + )?); + + // Builder healthy + let builder = MockHttpServer::serve(handler, now).await.unwrap(); + let builder_client = Arc::new(RpcClient::new( + format!("http://{}", builder.addr).parse::()?, + JwtSecret::random(), + 100, + PayloadSource::Builder, + )?); + + let health_handle = HealthHandle { + probes: probes.clone(), + execution_mode: Arc::new(Mutex::new(ExecutionMode::Enabled)), + l2_client: l2_client.clone(), + builder_client: builder_client.clone(), + health_check_interval: Duration::from_secs(60), + max_unsafe_interval: 5, + }; + + health_handle.spawn(); + tokio::time::sleep(Duration::from_secs(2)).await; + assert!(matches!(probes.health(), Health::ServiceUnavailable)); + Ok(()) + } + + #[serial] + #[tokio::test] + async fn tick_advances_after_sleep() { + let mut ts: MonotonicTimestamp = MonotonicTimestamp::new(); + let t1 = ts.tick(); + tokio::time::sleep(Duration::from_secs(1)).await; + let t2 = ts.tick(); + + assert!(t2 >= t1 + 1,); + } + + #[serial] + #[tokio::test] + async fn tick_matches_system_clock() { + let mut ts = MonotonicTimestamp::new(); + let unix = + SystemTime::now().duration_since(UNIX_EPOCH).expect("Time went backwards").as_secs(); + + assert_eq!(ts.last_unix, unix); + + std::thread::sleep(Duration::from_secs(5)); + + let t1 = ts.tick(); + let unix = + SystemTime::now().duration_since(UNIX_EPOCH).expect("Time went backwards").as_secs(); + assert_eq!(t1, unix); + } +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/lib.rs b/rust/rollup-boost/crates/rollup-boost/src/lib.rs new file mode 100644 index 0000000000000..9a261992e5b93 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/lib.rs @@ -0,0 +1,43 @@ +#![allow(clippy::complexity)] + +mod client; +pub use client::{auth::*, http::*, rpc::*}; + +mod cli; +pub use cli::*; + +mod debug_api; +pub use debug_api::*; + +mod metrics; +pub use metrics::*; + +mod proxy; +pub use proxy::*; + +mod server; +pub use server::*; + +mod flashblocks; +pub use flashblocks::*; + +mod tracing; +pub use tracing::*; + +mod probe; +pub use probe::*; + +mod health; +pub use health::*; + +#[cfg(all(test, feature = "integration"))] +pub mod tests; + +mod selection; +pub use selection::*; + +mod engine_api; +pub use engine_api::*; + +mod version; +pub use version::*; diff --git a/rust/rollup-boost/crates/rollup-boost/src/metrics.rs b/rust/rollup-boost/crates/rollup-boost/src/metrics.rs new file mode 100644 index 0000000000000..12ffc32ba488a --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/metrics.rs @@ -0,0 +1,75 @@ +use std::net::SocketAddr; + +use eyre::Result; +use metrics::gauge; +use metrics_exporter_prometheus::PrometheusBuilder; +use metrics_util::layers::{PrefixLayer, Stack}; +use tokio::net::TcpListener; +use tracing::{error, info}; + +use http::StatusCode; +use hyper::{Request, Response, server::conn::http1, service::service_fn}; +use hyper_util::rt::TokioIo; +use jsonrpsee::http_client::HttpBody; +use metrics_exporter_prometheus::PrometheusHandle; + +use crate::{ExecutionMode, cli::RollupBoostServiceArgs}; + +pub fn init_metrics(args: &RollupBoostServiceArgs) -> Result<()> { + if args.metrics { + let recorder = PrometheusBuilder::new().build_recorder(); + let handle = recorder.handle(); + + Stack::new(recorder).push(PrefixLayer::new("rollup-boost")).install()?; + + // Start the metrics server + let metrics_addr = format!("{}:{}", args.metrics_host, args.metrics_port); + let addr: SocketAddr = metrics_addr.parse()?; + tokio::spawn(init_metrics_server(addr, handle)); // Run the metrics server in a separate task + } + Ok(()) +} + +async fn init_metrics_server(addr: SocketAddr, handle: PrometheusHandle) -> eyre::Result<()> { + let listener = TcpListener::bind(addr).await?; + info!("Metrics server running on {}", addr); + + loop { + match listener.accept().await { + Ok((stream, _)) => { + let handle = handle.clone(); // Clone the handle for each connection + tokio::task::spawn(async move { + let service = service_fn(move |_req: Request| { + let response = match _req.uri().path() { + "/metrics" => Response::builder() + .header("content-type", "text/plain") + .body(HttpBody::from(handle.render())) + .expect("Failed to create metrics response"), + _ => Response::builder() + .status(StatusCode::NOT_FOUND) + .body(HttpBody::empty()) + .expect("Failed to create not found response"), + }; + + async { Ok::<_, hyper::Error>(response) } + }); + + let io = TokioIo::new(stream); + + if let Err(err) = http1::Builder::new().serve_connection(io, service).await { + error!(message = "Error serving metrics connection", error = %err); + } + }); + } + Err(e) => { + error!(message = "Error accepting connection", error = %e); + } + } + } +} + +/// Update the execution_mode prometheus metric +pub fn update_execution_mode_gauge(execution_mode: ExecutionMode) { + let gauge = gauge!("rollup_boost_execution_mode"); + gauge.set(execution_mode.to_metric_value()); +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/probe.rs b/rust/rollup-boost/crates/rollup-boost/src/probe.rs new file mode 100644 index 0000000000000..ac289684283fd --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/probe.rs @@ -0,0 +1,142 @@ +use std::{ + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; + +use futures::FutureExt as _; +use jsonrpsee::{ + core::BoxError, + http_client::{HttpRequest, HttpResponse}, + server::HttpBody, +}; +use parking_lot::Mutex; +use tower::{Layer, Service}; +use tracing::info; + +use crate::{Request, Response}; + +#[derive(Copy, Clone, Debug, Default)] +pub enum Health { + /// Indicates that the builder is building blocks + #[default] + Healthy, + /// Indicates that the l2 is building blocks, but the builder is not + PartialContent, + /// Indicates that blocks are not being built by either the l2 or the builder + /// + /// Service starts out unavailable until the first blocks are built + ServiceUnavailable, +} + +impl From for Response { + fn from(health: Health) -> Self { + match health { + Health::Healthy => ok(), + Health::PartialContent => partial_content(), + Health::ServiceUnavailable => service_unavailable(), + } + } +} + +#[derive(Debug, Default)] +pub struct Probes { + health: Mutex, +} + +impl Probes { + pub fn set_health(&self, value: Health) { + info!(target: "rollup_boost::probe", "Updating health probe to to {:?}", value); + *self.health.lock() = value; + } + + pub fn health(&self) -> Health { + *self.health.lock() + } +} + +/// A [`Layer`] that adds probe endpoints to a service. +#[derive(Clone, Debug)] +pub struct ProbeLayer { + probes: Arc, +} + +impl ProbeLayer { + pub fn new() -> (Self, Arc) { + let probes = Arc::new(Probes::default()); + (Self { probes: probes.clone() }, probes) + } +} + +impl Layer for ProbeLayer { + type Service = ProbeService; + + fn layer(&self, inner: S) -> Self::Service { + ProbeService { inner, probes: self.probes.clone() } + } +} + +#[derive(Clone, Debug)] +pub struct ProbeService { + inner: S, + probes: Arc, +} + +impl Service for ProbeService +where + S: Service + Send + Sync + Clone + 'static, + S::Response: 'static, + S::Error: Into + 'static, + S::Future: Send + 'static, +{ + type Response = Response; + type Error = BoxError; + type Future = + Pin> + Send + 'static>>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx).map_err(Into::into) + } + + fn call(&mut self, request: HttpRequest) -> Self::Future { + // See https://github.com/tower-rs/tower/blob/abb375d08cf0ba34c1fe76f66f1aba3dc4341013/tower-service/src/lib.rs#L276 + // for an explanation of this pattern + let mut service = self.clone(); + service.inner = std::mem::replace(&mut self.inner, service.inner); + + async move { + match request.uri().path() { + // Return health status + "/healthz" => Ok(service.probes.health().into()), + // Service is responding, and therefor ready + "/readyz" => Ok(ok()), + // Service is responding, and therefor live + "/livez" => Ok(ok()), + // Forward the request to the inner service + _ => service.inner.call(request).await.map_err(|e| e.into()), + } + } + .boxed() + } +} + +fn ok() -> Response { + HttpResponse::builder() + .status(200) + .body(HttpBody::from("OK")) + .expect("Failed to create OK response") +} + +fn partial_content() -> Response { + HttpResponse::builder() + .status(206) + .body(HttpBody::from("Partial Content")) + .expect("Failed to create partial content response") +} + +fn service_unavailable() -> Response { + HttpResponse::builder() + .status(503) + .body(HttpBody::from("Service Unavailable")) + .expect("Failed to create service unavailable response") +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/proxy.rs b/rust/rollup-boost/crates/rollup-boost/src/proxy.rs new file mode 100644 index 0000000000000..9fa328a1b9923 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/proxy.rs @@ -0,0 +1,948 @@ +use crate::{ + Request, Response, client::http::HttpClient, from_buffered_request, into_buffered_request, +}; +use http_body_util::BodyExt as _; +use jsonrpsee::{core::BoxError, server::HttpBody}; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; +use tower::{Layer, Service}; +use tracing::info; + +const ENGINE_METHOD: &str = "engine_"; + +/// Requests that should be forwarded to both the builder and default execution client +const FORWARD_REQUESTS: [&str; 6] = [ + "eth_sendRawTransaction", + "eth_sendRawTransactionConditional", + "miner_setExtra", + "miner_setGasPrice", + "miner_setGasLimit", + "miner_setMaxDASize", +]; + +#[derive(Debug, Clone)] +pub struct ProxyLayer { + l2_client: HttpClient, + builder_client: HttpClient, +} + +impl ProxyLayer { + pub fn new(l2_client: HttpClient, builder_client: HttpClient) -> Self { + ProxyLayer { l2_client, builder_client } + } +} + +impl Layer for ProxyLayer { + type Service = ProxyService; + + fn layer(&self, inner: S) -> Self::Service { + ProxyService { + inner, + l2_client: self.l2_client.clone(), + builder_client: self.builder_client.clone(), + } + } +} + +#[derive(Clone)] +pub struct ProxyService { + inner: S, + l2_client: HttpClient, + builder_client: HttpClient, +} + +// Consider using `RpcServiceT` when https://github.com/paritytech/jsonrpsee/pull/1521 is merged +impl Service for ProxyService +where + S: Service + Send + Sync + Clone + 'static, + S::Response: 'static, + S::Error: Into + 'static, + S::Future: Send + 'static, +{ + type Response = Response; + type Error = BoxError; + type Future = + Pin> + Send + 'static>>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx).map_err(Into::into) + } + + fn call(&mut self, req: Request) -> Self::Future { + #[derive(serde::Deserialize, Debug)] + struct RpcRequest<'a> { + #[serde(borrow)] + method: &'a str, + } + + // See https://github.com/tower-rs/tower/blob/abb375d08cf0ba34c1fe76f66f1aba3dc4341013/tower-service/src/lib.rs#L276 + // for an explanation of this pattern + let mut service = self.clone(); + service.inner = std::mem::replace(&mut self.inner, service.inner); + + let fut = async move { + let buffered = into_buffered_request(req).await?; + let body_bytes = buffered.clone().collect().await?.to_bytes(); + + // Deserialize the bytes to find the method + let method = serde_json::from_slice::(&body_bytes)?.method.to_string(); + + // If the request is an Engine API method, call the inner RollupBoostServer + if method.starts_with(ENGINE_METHOD) { + info!(target: "proxy::call", message = "proxying request to rollup-boost server", ?method); + return service + .inner + .call(from_buffered_request(buffered)) + .await + .map_err(|e| e.into()); + } + + if FORWARD_REQUESTS.contains(&method.as_str()) { + // If the request should be forwarded, send to both the + // default execution client and the builder + let method_clone = method.clone(); + let buffered_clone = buffered.clone(); + let mut builder_client = service.builder_client.clone(); + + // Fire and forget the builder request + tokio::spawn(async move { + let _ = builder_client.forward(buffered_clone, method_clone).await; + }); + } + + // Return the response from the L2 client + service.l2_client.forward(buffered, method).await.map(|res| res.map(HttpBody::new)) + }; + + Box::pin(fut) + } +} + +#[cfg(test)] +mod tests { + use crate::{ClientArgs, probe::ProbeLayer}; + use rollup_boost_types::payload::PayloadSource; + + use super::*; + use alloy_primitives::{B256, Bytes, U64, U128, hex}; + use alloy_rpc_types_engine::JwtSecret; + use alloy_rpc_types_eth::erc4337::TransactionConditional; + use http::{StatusCode, Uri}; + use http_body_util::{BodyExt, Full}; + use hyper::service::service_fn; + use hyper_util::{ + client::legacy::{Client, connect::HttpConnector}, + rt::{TokioExecutor, TokioIo}, + }; + use jsonrpsee::{ + RpcModule, + core::{ClientError, client::ClientT}, + http_client::HttpClient, + rpc_params, + server::{Server, ServerBuilder, ServerHandle}, + types::{ErrorCode, ErrorObject}, + }; + use serde_json::json; + use std::{net::SocketAddr, sync::Arc}; + use tokio::{net::TcpListener, task::JoinHandle}; + + // A JSON-RPC error is retriable if error.code ∉ (-32700, -32600] + fn is_retriable_code(code: i32) -> bool { + !(-32700..=-32600).contains(&code) + } + + struct TestHarness { + builder: MockHttpServer, + l2: MockHttpServer, + server_handle: ServerHandle, + proxy_client: HttpClient, + } + + impl Drop for TestHarness { + fn drop(&mut self) { + self.server_handle.stop().unwrap(); + } + } + + impl TestHarness { + async fn new() -> eyre::Result { + let builder = MockHttpServer::serve().await?; + let l2 = MockHttpServer::serve().await?; + let middleware = tower::ServiceBuilder::new().layer(ProxyLayer::new( + ClientArgs { + url: format!("http://{}:{}", l2.addr.ip(), l2.addr.port()).parse::()?, + jwt_token: Some(JwtSecret::random()), + jwt_path: None, + timeout: 1, + } + .new_http_client(PayloadSource::L2) + .unwrap(), + ClientArgs { + url: format!("http://{}:{}", builder.addr.ip(), builder.addr.port()) + .parse::()?, + jwt_token: Some(JwtSecret::random()), + jwt_path: None, + timeout: 1, + } + .new_http_client(PayloadSource::Builder)?, + )); + + let temp_listener = TcpListener::bind("127.0.0.1:0").await?; + let server_addr = temp_listener.local_addr()?; + drop(temp_listener); + + let server = Server::builder() + .set_http_middleware(middleware.clone()) + .build(server_addr) + .await?; + + let server_addr = server.local_addr()?; + let proxy_client: HttpClient = HttpClient::builder().build(format!( + "http://{}:{}", + server_addr.ip(), + server_addr.port() + ))?; + + let server_handle = server.start(RpcModule::new(())); + + Ok(Self { builder, l2, server_handle, proxy_client }) + } + } + + struct MockHttpServer { + addr: SocketAddr, + requests: Arc>>, + join_handle: JoinHandle<()>, + shutdown_tx: Option>, + connections: Arc>>>, + } + + impl Drop for MockHttpServer { + fn drop(&mut self) { + // Send shutdown signal if available + if let Some(tx) = self.shutdown_tx.take() { + let _ = tx.send(()); + } + // Abort active connections to simulate a crash closing open sockets + if let Ok(mut conns) = self.connections.try_lock() { + for handle in conns.drain(..) { + handle.abort(); + } + } + self.join_handle.abort(); + } + } + + impl MockHttpServer { + async fn serve() -> eyre::Result { + let listener = TcpListener::bind("127.0.0.1:0").await?; + let addr = listener.local_addr()?; + Self::serve_with_listener(listener, addr).await + } + + async fn serve_on_addr(addr: SocketAddr) -> eyre::Result { + let listener = TcpListener::bind(addr).await?; + let actual_addr = listener.local_addr()?; + Self::serve_with_listener(listener, actual_addr).await + } + + async fn serve_with_listener( + listener: TcpListener, + addr: SocketAddr, + ) -> eyre::Result { + let requests = Arc::new(tokio::sync::Mutex::new(vec![])); + let (shutdown_tx, mut shutdown_rx) = tokio::sync::oneshot::channel(); + let connections: Arc>>> = + Arc::new(tokio::sync::Mutex::new(Vec::new())); + + let requests_clone = requests.clone(); + let connections_clone = connections.clone(); + let handle = tokio::spawn(async move { + loop { + tokio::select! { + _ = &mut shutdown_rx => { + // Shutdown signal received + break; + } + result = listener.accept() => { + match result { + Ok((stream, _)) => { + let io = TokioIo::new(stream); + let requests = requests_clone.clone(); + + let conn_task = tokio::spawn(async move { + if let Err(err) = hyper::server::conn::http1::Builder::new() + .serve_connection( + io, + service_fn(move |req| { + Self::handle_request(req, requests.clone()) + }), + ) + .await + { + eprintln!("Error serving connection: {err}"); + } + }); + // Track the connection task so we can abort on crash + connections_clone.lock().await.push(conn_task); + } + Err(e) => eprintln!("Error accepting connection: {e}"), + } + } + } + } + }); + + Ok(Self { + addr, + requests, + join_handle: handle, + shutdown_tx: Some(shutdown_tx), + connections, + }) + } + + async fn handle_request( + req: hyper::Request, + requests: Arc>>, + ) -> Result, hyper::Error> { + let body_bytes = match req.into_body().collect().await { + Ok(buf) => buf.to_bytes(), + Err(_) => { + let error_response = json!({ + "jsonrpc": "2.0", + "error": { "code": -32700, "message": "Failed to read request body" }, + "id": null + }); + return Ok(hyper::Response::new(error_response.to_string())); + } + }; + + let request_body: serde_json::Value = match serde_json::from_slice(&body_bytes) { + Ok(json) => json, + Err(_) => { + let error_response = json!({ + "jsonrpc": "2.0", + "error": { "code": -32700, "message": "Invalid JSON format" }, + "id": null + }); + return Ok(hyper::Response::new(error_response.to_string())); + } + }; + + // spawn and await so that the requests will eventually be processed + // even after the request is cancelled + let request_body_clone = request_body.clone(); + tokio::spawn(async move { + requests.lock().await.push(request_body_clone); + }) + .await + .unwrap(); + + let method = request_body["method"].as_str().unwrap_or_default(); + + let response = match method { + "eth_sendRawTransaction" | "eth_sendRawTransactionConditional" => json!({ + "jsonrpc": "2.0", + "result": format!("{}", B256::from([1; 32])), + "id": request_body["id"] + }), + "miner_setMaxDASize" | "miner_setGasLimit" | "miner_setGasPrice" | + "miner_setExtra" => { + json!({ + "jsonrpc": "2.0", + "result": true, + "id": request_body["id"] + }) + } + "mock_forwardedMethod" => { + json!({ + "jsonrpc": "2.0", + "result": "forwarded response", + "id": request_body["id"] + }) + } + _ => { + let error_response = json!({ + "jsonrpc": "2.0", + "error": { "code": -32601, "message": "Method not found" }, + "id": request_body["id"] + }); + return Ok(hyper::Response::new(error_response.to_string())); + } + }; + + Ok(hyper::Response::new(response.to_string())) + } + } + + #[cfg(test)] + #[ctor::ctor] + fn crypto_ring_init() { + rustls::crypto::ring::default_provider().install_default().unwrap(); + } + + #[tokio::test] + async fn test_proxy_service() { + proxy_success().await; + proxy_failure().await; + does_not_proxy_engine_method().await; + health_check().await; + } + + async fn proxy_success() { + let response = send_request("greet_melkor").await; + assert!(response.is_ok()); + assert_eq!(response.unwrap(), "You are the dark lord"); + } + + async fn proxy_failure() { + let response = send_request("non_existent_method").await; + assert!(response.is_err()); + let expected_error = ErrorObject::from(ErrorCode::MethodNotFound).into_owned(); + assert!(matches!( + response.unwrap_err(), + ClientError::Call(e) if e == expected_error + )); + } + + async fn does_not_proxy_engine_method() { + let response = send_request("engine_method").await; + assert!(response.is_ok()); + assert_eq!(response.unwrap(), "engine response"); + } + + async fn health_check() { + // Spawn a backend for the proxy to point to, and a proxy with dynamic port + let (backend_server, backend_addr) = spawn_server().await; + let (proxy_server, proxy_addr) = spawn_proxy_server_with_l2(backend_addr).await; + // Create a new HTTP client + let client: Client> = + Client::builder(TokioExecutor::new()).build_http(); + + // Test the health check endpoint + let health_check_url = format!("http://{proxy_addr}/healthz"); + let health_response = client.get(health_check_url.parse::().unwrap()).await; + assert!(health_response.is_ok()); + let status = health_response.unwrap().status(); + assert_eq!(status, StatusCode::OK); + + proxy_server.stop().unwrap(); + proxy_server.stopped().await; + backend_server.stop().unwrap(); + backend_server.stopped().await; + } + + async fn send_request(method: &str) -> Result { + let (backend_server, backend_addr) = spawn_server().await; + let (proxy_server, proxy_addr) = spawn_proxy_server_with_l2(backend_addr).await; + let proxy_client = HttpClient::builder().build(format!("http://{proxy_addr}")).unwrap(); + + let response = proxy_client.request::(method, rpc_params![]).await; + + backend_server.stop().unwrap(); + backend_server.stopped().await; + proxy_server.stop().unwrap(); + proxy_server.stopped().await; + + response + } + + async fn spawn_server() -> (ServerHandle, SocketAddr) { + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + let addr = listener.local_addr().unwrap(); + drop(listener); + let server = ServerBuilder::default().build(addr).await.unwrap(); + + // Create a mock rpc module + let mut module = RpcModule::new(()); + module.register_method("greet_melkor", |_, _, _| "You are the dark lord").unwrap(); + + (server.start(module), addr) + } + + /// Spawn a new RPC server with a proxy layer pointing to a provided L2 address. + async fn spawn_proxy_server_with_l2(l2_addr: SocketAddr) -> (ServerHandle, SocketAddr) { + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + let proxy_addr = listener.local_addr().unwrap(); + drop(listener); + + let jwt = JwtSecret::random(); + let l2_auth_uri = format!("http://{l2_addr}").parse::().unwrap(); + + let (probe_layer, _) = ProbeLayer::new(); + let proxy_layer = ProxyLayer::new( + ClientArgs { + url: l2_auth_uri.clone(), + jwt_token: Some(jwt), + jwt_path: None, + timeout: 1, + } + .new_http_client(PayloadSource::L2) + .unwrap(), + ClientArgs { + url: l2_auth_uri.clone(), + jwt_token: Some(jwt), + jwt_path: None, + timeout: 1, + } + .new_http_client(PayloadSource::Builder) + .unwrap(), + ); + + // Create a layered server + let server = ServerBuilder::default() + .set_http_middleware(tower::ServiceBuilder::new().layer(probe_layer).layer(proxy_layer)) + .build(proxy_addr) + .await + .unwrap(); + + // Create a mock rpc module + let mut module = RpcModule::new(()); + module.register_method("engine_method", |_, _, _| "engine response").unwrap(); + module + .register_method("eth_sendRawTransaction", |_, _, _| "raw transaction response") + .unwrap(); + module.register_method("non_existent_method", |_, _, _| "no proxy response").unwrap(); + + (server.start(module), proxy_addr) + } + + #[tokio::test] + async fn test_forward_set_max_da_size() -> eyre::Result<()> { + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + let test_harness = TestHarness::new().await?; + + let max_tx_size = U64::MAX; + let max_block_size = U64::MAX; + + test_harness + .proxy_client + .request::("miner_setMaxDASize", (max_tx_size, max_block_size)) + .await?; + + tokio::time::sleep(tokio::time::Duration::from_secs(2)).await; + + let expected_method = "miner_setMaxDASize"; + let expected_tx_size = json!(max_tx_size); + let expected_block_size = json!(max_block_size); + + // Assert the builder received the correct payload + let builder = &test_harness.builder; + let builder_requests = builder.requests.lock().await; + let builder_req = builder_requests.first().unwrap(); + assert_eq!(builder_requests.len(), 1); + assert_eq!(builder_req["method"], expected_method); + assert_eq!(builder_req["params"][0], expected_tx_size); + assert_eq!(builder_req["params"][1], expected_block_size); + + // Assert the l2 received the correct payload + let l2 = &test_harness.l2; + let l2_requests = l2.requests.lock().await; + let l2_req = l2_requests.first().unwrap(); + assert_eq!(l2_requests.len(), 1); + assert_eq!(l2_req["method"], expected_method); + assert_eq!(l2_req["params"][0], expected_tx_size); + assert_eq!(builder_req["params"][1], expected_block_size); + + Ok(()) + } + + #[tokio::test] + async fn test_forward_eth_send_raw_transaction() -> eyre::Result<()> { + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + let test_harness = TestHarness::new().await?; + + let expected_tx: Bytes = hex!("1234").into(); + let expected_method = "eth_sendRawTransaction"; + + test_harness + .proxy_client + .request::(expected_method, (expected_tx.clone(),)) + .await?; + + let expected_tx = json!(expected_tx); + + // Assert the builder received the correct payload + let builder = &test_harness.builder; + let builder_requests = builder.requests.lock().await; + let builder_req = builder_requests.first().unwrap(); + assert_eq!(builder_requests.len(), 1); + assert_eq!(builder_req["method"], expected_method); + assert_eq!(builder_req["params"][0], expected_tx); + + // Assert the l2 received the correct payload + let l2 = &test_harness.l2; + let l2_requests = l2.requests.lock().await; + let l2_req = l2_requests.first().unwrap(); + assert_eq!(l2_requests.len(), 1); + assert_eq!(l2_req["method"], expected_method); + assert_eq!(l2_req["params"][0], expected_tx); + + Ok(()) + } + + #[tokio::test] + async fn test_forward_eth_send_raw_transaction_conditional() -> eyre::Result<()> { + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + let test_harness = TestHarness::new().await?; + + let expected_tx: Bytes = hex!("1234").into(); + let expected_method = "eth_sendRawTransactionConditional"; + let transact_conditionals = TransactionConditional::default(); + test_harness + .proxy_client + .request::( + expected_method, + (expected_tx.clone(), transact_conditionals.clone()), + ) + .await?; + + let expected_tx = json!(expected_tx); + let expected_conditionals = json!(transact_conditionals); + // Assert the builder received the correct payload + let builder = &test_harness.builder; + let builder_requests = builder.requests.lock().await; + let builder_req = builder_requests.first().unwrap(); + assert_eq!(builder_requests.len(), 1); + assert_eq!(builder_req["method"], expected_method); + assert_eq!(builder_req["params"][0], expected_tx); + assert_eq!(builder_req["params"][1], expected_conditionals); + + // Assert the l2 received the correct payload + let l2 = &test_harness.l2; + let l2_requests = l2.requests.lock().await; + let l2_req = l2_requests.first().unwrap(); + assert_eq!(l2_requests.len(), 1); + assert_eq!(l2_req["method"], expected_method); + assert_eq!(l2_req["params"][0], expected_tx); + assert_eq!(l2_req["params"][1], expected_conditionals); + + Ok(()) + } + + #[tokio::test] + async fn test_forward_miner_set_extra() -> eyre::Result<()> { + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + let test_harness = TestHarness::new().await?; + + let extra = Bytes::default(); + let expected_method = "miner_setExtra"; + + test_harness + .proxy_client + .request::(expected_method, (extra.clone(),)) + .await?; + + let expected_extra = json!(extra); + + // Assert the builder received the correct payload + let builder = &test_harness.builder; + let builder_requests = builder.requests.lock().await; + let builder_req = builder_requests.first().unwrap(); + assert_eq!(builder_requests.len(), 1); + assert_eq!(builder_req["method"], expected_method); + assert_eq!(builder_req["params"][0], expected_extra); + + // Assert the l2 received the correct payload + let l2 = &test_harness.l2; + let l2_requests = l2.requests.lock().await; + let l2_req = l2_requests.first().unwrap(); + assert_eq!(l2_requests.len(), 1); + assert_eq!(l2_req["method"], expected_method); + assert_eq!(l2_req["params"][0], expected_extra); + + Ok(()) + } + + #[tokio::test] + async fn test_forward_miner_set_gas_price() -> eyre::Result<()> { + let test_harness = TestHarness::new().await?; + + let gas_price = U128::ZERO; + let expected_method = "miner_setGasPrice"; + + test_harness + .proxy_client + .request::(expected_method, (gas_price,)) + .await?; + + let expected_price = json!(gas_price); + tokio::time::sleep(tokio::time::Duration::from_secs(3)).await; + + // Assert the builder received the correct payload + let builder = &test_harness.builder; + let builder_requests = builder.requests.lock().await; + let builder_req = builder_requests.first().unwrap(); + assert_eq!(builder_requests.len(), 1); + assert_eq!(builder_req["method"], expected_method); + assert_eq!(builder_req["params"][0], expected_price); + + // Assert the l2 received the correct payload + let l2 = &test_harness.l2; + let l2_requests = l2.requests.lock().await; + let l2_req = l2_requests.first().unwrap(); + assert_eq!(l2_requests.len(), 1); + assert_eq!(l2_req["method"], expected_method); + assert_eq!(l2_req["params"][0], expected_price); + + Ok(()) + } + + #[tokio::test] + async fn test_forward_miner_set_gas_limit() -> eyre::Result<()> { + let test_harness = TestHarness::new().await?; + + let gas_limit = U128::ZERO; + let expected_method = "miner_setGasLimit"; + + test_harness + .proxy_client + .request::(expected_method, (gas_limit,)) + .await?; + + let expected_price = json!(gas_limit); + + tokio::time::sleep(tokio::time::Duration::from_secs(2)).await; + + // Assert the builder received the correct payload + let builder = &test_harness.builder; + let builder_requests = builder.requests.lock().await; + let builder_req = builder_requests.first().unwrap(); + assert_eq!(builder_requests.len(), 1); + assert_eq!(builder_req["method"], expected_method); + assert_eq!(builder_req["params"][0], expected_price); + + // Assert the l2 received the correct payload + let l2 = &test_harness.l2; + let l2_requests = l2.requests.lock().await; + let l2_req = l2_requests.first().unwrap(); + assert_eq!(l2_requests.len(), 1); + assert_eq!(l2_req["method"], expected_method); + assert_eq!(l2_req["params"][0], expected_price); + + Ok(()) + } + + #[tokio::test] + async fn test_direct_forward_mock_request() -> eyre::Result<()> { + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + let test_harness = TestHarness::new().await?; + + let mock_data = U128::ZERO; + let expected_method = "mock_forwardedMethod"; + + test_harness + .proxy_client + .request::(expected_method, (mock_data,)) + .await?; + + let expected_price = json!(mock_data); + + // Assert the builder has not received the payload + let builder = &test_harness.builder; + let builder_requests = builder.requests.lock().await; + assert_eq!(builder_requests.len(), 0); + + // Assert the l2 auth received the correct payload + let l2 = &test_harness.l2; + let l2_requests = l2.requests.lock().await; + let l2_req = l2_requests.first().unwrap(); + assert_eq!(l2_requests.len(), 1); + assert_eq!(l2_req["method"], expected_method); + assert_eq!(l2_req["params"][0], expected_price); + + Ok(()) + } + + #[tokio::test] + async fn test_l2_server_recovery() -> eyre::Result<()> { + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + + // Step 1: Reserve a port for L2 by binding and then releasing it + let temp_listener = TcpListener::bind("127.0.0.1:0").await?; + let l2_addr = temp_listener.local_addr()?; + drop(temp_listener); + + // Wait for port to be fully released + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + + // Step 2: Create builder and proxy WITHOUT an L2 server running yet + let builder = MockHttpServer::serve().await?; + let builder_addr = builder.addr; + let jwt = JwtSecret::random(); + + // Create proxy layer with L2 client pointing to a non-existent server + // Use a short timeout to fail quickly + let proxy_layer = ProxyLayer::new( + ClientArgs { + url: format!("http://{}:{}", l2_addr.ip(), l2_addr.port()).parse::()?, + jwt_token: Some(jwt), + jwt_path: None, + timeout: 200, // Short timeout for faster failure + } + .new_http_client(PayloadSource::L2)?, + ClientArgs { + url: format!("http://{}:{}", builder_addr.ip(), builder_addr.port()) + .parse::()?, + jwt_token: Some(jwt), + jwt_path: None, + timeout: 200, + } + .new_http_client(PayloadSource::Builder)?, + ); + + // Start proxy server + let listener = TcpListener::bind("127.0.0.1:0").await?; + let proxy_addr = listener.local_addr()?; + drop(listener); + + let server = Server::builder() + .set_http_middleware(tower::ServiceBuilder::new().layer(proxy_layer)) + .build(proxy_addr) + .await?; + + let proxy_addr = server.local_addr()?; + let proxy_client: HttpClient = HttpClient::builder().build(format!( + "http://{}:{}", + proxy_addr.ip(), + proxy_addr.port() + ))?; + + let server_handle = server.start(RpcModule::new(())); + + // Step 3: Request should fail (connection refused) because L2 server doesn't exist + let mock_data = U128::from(42); + let result = proxy_client + .request::("mock_forwardedMethod", (mock_data,)) + .await; + assert!(result.is_err(), "Request should fail when L2 server is not running"); + println!("Request failed as expected (no server): {result:?}"); + + // Step 4: Start the L2 server + let l2 = MockHttpServer::serve_on_addr(l2_addr).await?; + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + + // Step 5: Request should now succeed (demonstrating auto-recovery) + let result = proxy_client + .request::("mock_forwardedMethod", (mock_data,)) + .await; + assert!( + result.is_ok(), + "Request should succeed after L2 server starts (auto-recovery): {result:?}" + ); + println!("Request succeeded after server started: {result:?}"); + + // Step 6: Verify multiple subsequent requests work consistently + for i in 0..3 { + let result = proxy_client + .request::("mock_forwardedMethod", (mock_data,)) + .await; + assert!(result.is_ok(), "Request {i} should continue to succeed: {result:?}"); + } + + // Verify the server received requests + { + let l2_requests = l2.requests.lock().await; + assert!(!l2_requests.is_empty(), "L2 server should have received requests"); + assert_eq!(l2_requests[0]["method"], "mock_forwardedMethod"); + } + + // Cleanup + server_handle.stop()?; + drop(builder); + drop(l2); + + Ok(()) + } + + #[tokio::test] + async fn test_success_then_failure_then_success() -> eyre::Result<()> { + // Dynamically bind L2 and Proxy servers + let l2 = MockHttpServer::serve().await?; + let l2_addr = l2.addr; + + // Build proxy with short timeouts pointing to current L2 + let jwt = JwtSecret::random(); + let proxy_layer = ProxyLayer::new( + ClientArgs { + url: format!("http://{}:{}", l2_addr.ip(), l2_addr.port()).parse::()?, + jwt_token: Some(jwt), + jwt_path: None, + timeout: 200, + } + .new_http_client(PayloadSource::L2)?, + ClientArgs { + url: format!("http://{}:{}", l2_addr.ip(), l2_addr.port()).parse::()?, + jwt_token: Some(jwt), + jwt_path: None, + timeout: 200, + } + .new_http_client(PayloadSource::Builder)?, + ); + + // Start proxy on dynamic port + let listener = TcpListener::bind("127.0.0.1:0").await?; + let proxy_addr = listener.local_addr()?; + drop(listener); + + let server = Server::builder() + .set_http_middleware(tower::ServiceBuilder::new().layer(proxy_layer)) + .build(proxy_addr) + .await?; + let proxy_addr = server.local_addr()?; + let proxy_client: HttpClient = HttpClient::builder().build(format!( + "http://{}:{}", + proxy_addr.ip(), + proxy_addr.port() + ))?; + let server_handle = server.start(RpcModule::new(())); + + let mock_data = U128::from(7); + + // 1) Initial success + let res = proxy_client + .request::("mock_forwardedMethod", (mock_data,)) + .await; + assert!(res.is_ok(), "initial request should succeed: {res:?}"); + + // 2) Stop L2 -> subsequent failure + drop(l2); + tokio::time::sleep(tokio::time::Duration::from_millis(200)).await; + // Expect a JSON-RPC error object with code -32000 (retriable) + let res = proxy_client + .request::("mock_forwardedMethod", (mock_data,)) + .await; + match res { + Ok(v) => unreachable!("expected error when L2 down, got: {v:?}"), + Err(ClientError::Call(e)) => { + let code = e.code(); + assert!( + is_retriable_code(code), + "expected retriable code (not parse/invalid), got {code}" + ); + } + Err(_other) => { + // Transport or other non-Call errors are considered retriable + assert!(matches!(_other, ClientError::Transport(_)), "expected transport error"); + } + } + + // 3) Restart L2 -> subsequent success + let l2_restarted = MockHttpServer::serve_on_addr(l2_addr).await?; + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + let res = proxy_client + .request::("mock_forwardedMethod", (mock_data,)) + .await; + assert!(res.is_ok(), "request should succeed after L2 restart: {res:?}"); + + // Cleanup + server_handle.stop()?; + drop(l2_restarted); + + Ok(()) + } +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/selection.rs b/rust/rollup-boost/crates/rollup-boost/src/selection.rs new file mode 100644 index 0000000000000..dfea552282e4c --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/selection.rs @@ -0,0 +1,91 @@ +use rollup_boost_types::payload::{OpExecutionPayloadEnvelope, PayloadSource}; +use serde::{Deserialize, Serialize}; + +/// Defines the strategy for choosing between the builder block and the L2 client block +/// during block production. +#[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq, clap::ValueEnum)] +pub enum BlockSelectionPolicy { + /// Selects the block based on gas usage. + /// + /// If the builder block uses less than 10% of the gas used by the L2 client block, + /// the L2 block is selected instead. This prevents propagation of valid but empty + /// builder blocks and mitigates issues where the builder is not receiving enough + /// transactions due to networking or peering failures. + GasUsed, +} + +impl BlockSelectionPolicy { + pub fn select_block( + &self, + builder_payload: OpExecutionPayloadEnvelope, + l2_payload: OpExecutionPayloadEnvelope, + ) -> (OpExecutionPayloadEnvelope, PayloadSource) { + match self { + BlockSelectionPolicy::GasUsed => { + let builder_gas = builder_payload.gas_used() as f64; + let l2_gas = l2_payload.gas_used() as f64; + + // Select the L2 block if the builder block uses less than 10% of the gas. + // This avoids selecting empty or severely underfilled blocks, + if builder_gas < l2_gas * 0.1 { + (l2_payload, PayloadSource::L2) + } else { + (builder_payload, PayloadSource::Builder) + } + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use op_alloy_rpc_types_engine::OpExecutionPayloadEnvelopeV4; + + #[test] + fn test_gas_used_policy_select_l2_block() -> eyre::Result<()> { + let execution_payload = r#"{"executionPayload":{"parentHash":"0xe927a1448525fb5d32cb50ee1408461a945ba6c39bd5cf5621407d500ecc8de9","feeRecipient":"0x0000000000000000000000000000000000000000","stateRoot":"0x10f8a0830000e8edef6d00cc727ff833f064b1950afd591ae41357f97e543119","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prevRandao":"0xe0d8b4521a7da1582a713244ffb6a86aa1726932087386e2dc7973f43fc6cb24","blockNumber":"0x1","gasLimit":"0x2ffbd2","gasUsed":"0x0","timestamp":"0x1235","extraData":"0xd883010d00846765746888676f312e32312e30856c696e7578","baseFeePerGas":"0x342770c0","blockHash":"0x44d0fa5f2f73a938ebb96a2a21679eb8dea3e7b7dd8fd9f35aa756dda8bf0a8a","transactions":[],"withdrawals":[],"blobGasUsed":"0x0","excessBlobGas":"0x0","withdrawalsRoot":"0x123400000000000000000000000000000000000000000000000000000000babe"},"blockValue":"0x0","blobsBundle":{"commitments":[],"proofs":[],"blobs":[]},"shouldOverrideBuilder":false,"parentBeaconBlockRoot":"0xdead00000000000000000000000000000000000000000000000000000000beef","executionRequests":["0xdeadbeef"]}"#; + let mut builder_payload: OpExecutionPayloadEnvelopeV4 = + serde_json::from_str(execution_payload)?; + let mut l2_payload = builder_payload.clone(); + + let gas_used = 1000000000; + l2_payload.execution_payload.payload_inner.payload_inner.payload_inner.gas_used = gas_used; + + builder_payload.execution_payload.payload_inner.payload_inner.payload_inner.gas_used = + (gas_used as f64 * 0.09) as u64; + + let builder_payload = OpExecutionPayloadEnvelope::V4(builder_payload); + let l2_payload = OpExecutionPayloadEnvelope::V4(l2_payload); + + let selected_payload = + BlockSelectionPolicy::GasUsed.select_block(builder_payload, l2_payload); + + assert_eq!(selected_payload.1, PayloadSource::L2); + Ok(()) + } + + #[test] + fn test_gas_used_policy_select_builder_block() -> eyre::Result<()> { + let execution_payload = r#"{"executionPayload":{"parentHash":"0xe927a1448525fb5d32cb50ee1408461a945ba6c39bd5cf5621407d500ecc8de9","feeRecipient":"0x0000000000000000000000000000000000000000","stateRoot":"0x10f8a0830000e8edef6d00cc727ff833f064b1950afd591ae41357f97e543119","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prevRandao":"0xe0d8b4521a7da1582a713244ffb6a86aa1726932087386e2dc7973f43fc6cb24","blockNumber":"0x1","gasLimit":"0x2ffbd2","gasUsed":"0x0","timestamp":"0x1235","extraData":"0xd883010d00846765746888676f312e32312e30856c696e7578","baseFeePerGas":"0x342770c0","blockHash":"0x44d0fa5f2f73a938ebb96a2a21679eb8dea3e7b7dd8fd9f35aa756dda8bf0a8a","transactions":[],"withdrawals":[],"blobGasUsed":"0x0","excessBlobGas":"0x0","withdrawalsRoot":"0x123400000000000000000000000000000000000000000000000000000000babe"},"blockValue":"0x0","blobsBundle":{"commitments":[],"proofs":[],"blobs":[]},"shouldOverrideBuilder":false,"parentBeaconBlockRoot":"0xdead00000000000000000000000000000000000000000000000000000000beef","executionRequests":["0xdeadbeef"]}"#; + let mut builder_payload: OpExecutionPayloadEnvelopeV4 = + serde_json::from_str(execution_payload)?; + let mut l2_payload = builder_payload.clone(); + + let gas_used = 1000000000; + l2_payload.execution_payload.payload_inner.payload_inner.payload_inner.gas_used = gas_used; + + builder_payload.execution_payload.payload_inner.payload_inner.payload_inner.gas_used = + (gas_used as f64 * 0.1) as u64; + + let builder_payload = OpExecutionPayloadEnvelope::V4(builder_payload); + let l2_payload = OpExecutionPayloadEnvelope::V4(l2_payload); + + let selected_payload = + BlockSelectionPolicy::GasUsed.select_block(builder_payload, l2_payload); + + assert_eq!(selected_payload.1, PayloadSource::Builder); + Ok(()) + } +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/server.rs b/rust/rollup-boost/crates/rollup-boost/src/server.rs new file mode 100644 index 0000000000000..347edc48fee3e --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/server.rs @@ -0,0 +1,1406 @@ +use crate::{ + BlockSelectionPolicy, ClientArgs, EngineApiExt, FlasblocksP2PRpcClient, Flashblocks, + FlashblocksP2PKeys, RollupBoostLibArgs, + client::rpc::RpcClient, + debug_api::ExecutionMode, + health::HealthHandle, + probe::{Health, Probes}, + update_execution_mode_gauge, +}; +use alloy_primitives::{B256, Bytes, bytes}; +use alloy_rpc_types_engine::{ + ExecutionPayload, ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, PayloadId, + PayloadStatus, +}; +use alloy_rpc_types_eth::{Block, BlockNumberOrTag}; +use dashmap::DashMap; +use http_body_util::{BodyExt, Full}; +use jsonrpsee::{ + RpcModule, + core::{BoxError, RegisterMethodError, RpcResult, async_trait}, + proc_macros::rpc, + server::{HttpBody, HttpRequest, HttpResponse}, + types::{ErrorObject, error::INVALID_REQUEST_CODE}, +}; +use metrics::counter; +use op_alloy_rpc_types_engine::{ + OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4, OpExecutionPayloadV4, + OpPayloadAttributes, +}; +use opentelemetry::trace::SpanKind; +use parking_lot::Mutex; +use rollup_boost_types::{ + authorization::Authorization, + payload::{ + NewPayload, NewPayloadV3, NewPayloadV4, OpExecutionPayloadEnvelope, PayloadSource, + PayloadTraceContext, PayloadVersion, + }, +}; +use std::{ + net::{IpAddr, SocketAddr}, + str::FromStr, + sync::Arc, + time::Duration, +}; +use tokio::task::JoinHandle; +use tracing::{debug, error, info, instrument}; + +pub type Request = HttpRequest; +pub type Response = HttpResponse; +pub type BufferedRequest = http::Request>; +pub type BufferedResponse = http::Response>; + +#[derive(Debug)] +pub struct BuilderPayloadResult { + pub payload: Option, + pub builder_api_failed: bool, +} + +pub type BuilderResult = Result>; + +#[derive(Clone, Debug)] +pub struct RollupBoostServer { + pub l2_client: Arc, + pub builder_client: Arc, + pub payload_trace_context: Arc, + pub execution_mode: Arc>, + block_selection_policy: Option, + external_state_root: bool, + ignore_unhealthy_builders: bool, + probes: Arc, + payload_to_fcu_request: DashMap)>, +} + +impl RollupBoostServer { + pub fn new_from_args( + rollup_boost_args: RollupBoostLibArgs, + probes: Arc, + ) -> eyre::Result { + let l2_client_args: ClientArgs = rollup_boost_args.l2_client.into(); + let builder_client_args: ClientArgs = rollup_boost_args.builder.into(); + + let l2_client = l2_client_args.new_rpc_client(PayloadSource::L2)?; + let builder_client = builder_client_args.new_rpc_client(PayloadSource::Builder)?; + let execution_mode = Arc::new(Mutex::new(rollup_boost_args.execution_mode.clone())); + + let builder_client: Arc = + if rollup_boost_args.flashblocks_ws.flashblocks_ws { + let flashblocks_ws = rollup_boost_args.flashblocks_ws; + let inbound_url = flashblocks_ws.flashblocks_builder_url; + let outbound_addr = SocketAddr::new( + IpAddr::from_str(&flashblocks_ws.flashblocks_host)?, + flashblocks_ws.flashblocks_port, + ); + + Arc::new(Flashblocks::run( + builder_client.clone(), + inbound_url, + outbound_addr, + flashblocks_ws.flashblocks_ws_config, + )?) + } else if let Some(flashblocks_p2p) = rollup_boost_args.flashblocks_p2p.clone() { + Arc::new(FlasblocksP2PRpcClient { + inner: builder_client, + flashblocks_p2p_keys: FlashblocksP2PKeys { + authorization_sk: flashblocks_p2p.authorizer_sk.clone(), + builder_vk: flashblocks_p2p.builder_vk.clone(), + }, + execution_mode: execution_mode.clone(), + }) + } else { + Arc::new(builder_client) + }; + + Ok(RollupBoostServer::new( + l2_client, + builder_client, + execution_mode, + rollup_boost_args.block_selection_policy, + probes.clone(), + rollup_boost_args.external_state_root, + rollup_boost_args.ignore_unhealthy_builders, + )) + } +} + +impl RollupBoostServer { + pub fn new( + l2_client: RpcClient, + builder_client: Arc, + execution_mode: Arc>, + block_selection_policy: Option, + probes: Arc, + external_state_root: bool, + ignore_unhealthy_builders: bool, + ) -> Self { + update_execution_mode_gauge(*execution_mode.lock()); + Self { + l2_client: Arc::new(l2_client), + builder_client, + block_selection_policy, + payload_trace_context: Arc::new(PayloadTraceContext::new()), + execution_mode, + probes, + external_state_root, + ignore_unhealthy_builders, + payload_to_fcu_request: DashMap::new(), + } + } + + pub fn probes(&self) -> Arc { + self.probes.clone() + } + + pub fn spawn_health_check( + &self, + health_check_interval: u64, + max_unsafe_interval: u64, + ) -> JoinHandle<()> { + let handle = HealthHandle::new( + self.probes.clone(), + self.execution_mode.clone(), + self.l2_client.clone(), + self.builder_client.clone(), + Duration::from_secs(health_check_interval), + max_unsafe_interval, + ); + + handle.spawn() + } + + pub fn set_execution_mode(&self, execution_mode: ExecutionMode) { + *self.execution_mode.lock() = execution_mode; + } + + pub fn get_execution_mode(&self) -> ExecutionMode { + *self.execution_mode.lock() + } + + async fn new_payload(&self, new_payload: NewPayload) -> RpcResult { + let execution_payload = ExecutionPayload::from(new_payload.clone()); + let block_hash = execution_payload.block_hash(); + let parent_hash = execution_payload.parent_hash(); + info!(message = "received new_payload", "block_hash" = %block_hash, "version" = new_payload.version().as_str()); + + if let Some(causes) = + self.payload_trace_context.trace_ids_from_parent_hash(&parent_hash).await + { + causes.iter().for_each(|cause| { + tracing::Span::current().follows_from(cause); + }); + } + + self.payload_trace_context.remove_by_parent_hash(&parent_hash).await; + + // async call to builder to sync the builder node + if !self.execution_mode.lock().is_disabled() && !self.should_skip_unhealthy_builder() { + let builder = self.builder_client.clone(); + let new_payload_clone = new_payload.clone(); + tokio::spawn(async move { builder.new_payload(new_payload_clone).await }); + } + Ok(self.l2_client.new_payload(new_payload).await?) + } + + async fn get_payload( + &self, + payload_id: PayloadId, + version: PayloadVersion, + ) -> RpcResult { + let l2_fut = self.l2_client.get_payload(payload_id, version); + + // If execution mode is disabled, return the l2 payload without sending + // the request to the builder + if self.execution_mode.lock().is_disabled() { + return match l2_fut.await { + Ok(payload) => { + self.probes.set_health(Health::Healthy); + let context = PayloadSource::L2; + tracing::Span::current().record("payload_source", context.to_string()); + counter!("rpc.blocks_created", "source" => context.to_string()).increment(1); + + let execution_payload = ExecutionPayload::from(payload.clone()); + info!( + message = "returning block", + "hash" = %execution_payload.block_hash(), + "number" = %execution_payload.block_number(), + %context, + %payload_id, + // Add an extra label to know that this is the disabled execution mode path + "execution_mode" = "disabled", + ); + + Ok(payload) + } + + Err(e) => { + self.probes.set_health(Health::ServiceUnavailable); + Err(e.into()) + } + }; + } + + // Forward the get payload request to the builder + let builder_fut = async { + if let Some(cause) = self.payload_trace_context.trace_id(&payload_id).await { + tracing::Span::current().follows_from(cause); + } + if !self.payload_trace_context.has_builder_payload(&payload_id).await { + info!(message = "builder has no payload, skipping get_payload call to builder"); + tracing::Span::current().record("builder_has_payload", false); + return BuilderResult::Ok(BuilderPayloadResult { + payload: None, + builder_api_failed: true, + }); + } + + // Get payload and validate with the local l2 client + tracing::Span::current().record("builder_has_payload", true); + info!(message = "builder has payload, calling get_payload on builder"); + + let payload = match self.builder_client.get_payload(payload_id, version).await { + Ok(payload) => payload, + Err(e) => { + error!(message = "error getting payload from builder", error = %e); + return BuilderResult::Ok(BuilderPayloadResult { + payload: None, + builder_api_failed: true, + }); + } + }; + + if !self.external_state_root { + let _ = self.l2_client.new_payload(NewPayload::from(payload.clone())).await?; + + return BuilderResult::Ok(BuilderPayloadResult { + payload: Some(payload), + builder_api_failed: false, + }); + } + + let external_payload = + self.calculate_external_state_root(payload, payload_id, version).await?; + BuilderResult::Ok(BuilderPayloadResult { + payload: external_payload, + builder_api_failed: false, + }) + }; + + let (l2_payload, builder_payload) = tokio::join!(l2_fut, builder_fut); + + // Evaluate the builder and l2 response and select the final payload + let (payload, context) = { + let l2_payload = + l2_payload.inspect_err(|_| self.probes.set_health(Health::ServiceUnavailable))?; + self.probes.set_health(Health::Healthy); + + // Convert Result> to Option by extracting the inner Option. + // If there's an error, log it and return None instead. + let (builder_payload, builder_api_failed) = match builder_payload { + Ok(result) => (result.payload, result.builder_api_failed), + Err(e) => { + error!(message = "error getting payload from builder", error = %e); + (None, true) + } + }; + + if let Some(builder_payload) = builder_payload { + // Record the delta (gas and txn) between the builder and l2 payload + let span = tracing::Span::current(); + // use i64 to cover case when l2 builder has more gas/txs + span.record( + "gas_delta", + (builder_payload.gas_used() as i64 - l2_payload.gas_used() as i64).to_string(), + ); + span.record( + "tx_count_delta", + (builder_payload.tx_count() as i64 - l2_payload.tx_count() as i64).to_string(), + ); + + // If execution mode is set to DryRun, fallback to the l2_payload, + // otherwise prefer the builder payload + if self.execution_mode.lock().is_dry_run() { + (l2_payload, PayloadSource::L2) + } else if let Some(selection_policy) = &self.block_selection_policy { + selection_policy.select_block(builder_payload, l2_payload) + } else { + (builder_payload, PayloadSource::Builder) + } + } else { + // Only update the health status if the builder payload fails + // and execution mode is enabled + if self.execution_mode.lock().is_enabled() && builder_api_failed { + self.probes.set_health(Health::PartialContent); + } + (l2_payload, PayloadSource::L2) + } + }; + + tracing::Span::current().record("payload_source", context.to_string()); + // To maintain backwards compatibility with old metrics, we need to record blocks built + // This is temporary until we migrate to the new metrics + counter!("rpc.blocks_created", "source" => context.to_string()).increment(1); + + let inner_payload = ExecutionPayload::from(payload.clone()); + let block_hash = inner_payload.block_hash(); + let block_number = inner_payload.block_number(); + let state_root = inner_payload.as_v1().state_root; + + // Note: This log message is used by integration tests to track payload context. + // While not ideal to rely on log parsing, it provides a reliable way to verify behavior. + // Happy to consider an alternative approach later on. + info!( + message = "returning block", + "hash" = %block_hash, + "number" = %block_number, + "state_root" = %state_root, + %context, + %payload_id, + ); + Ok(payload) + } + + fn should_skip_unhealthy_builder(&self) -> bool { + self.ignore_unhealthy_builders && !matches!(self.probes.health(), Health::Healthy) + } + + async fn calculate_external_state_root( + &self, + builder_payload: OpExecutionPayloadEnvelope, + payload_id: PayloadId, + version: PayloadVersion, + ) -> Result, ErrorObject<'static>> { + let fcu_info = self.payload_to_fcu_request.remove(&payload_id).unwrap().1; + + let new_payload_attrs = match fcu_info.1.as_ref() { + Some(attrs) => OpPayloadAttributes { + payload_attributes: attrs.payload_attributes.clone(), + transactions: Some(builder_payload.transactions()), + no_tx_pool: Some(true), + gas_limit: attrs.gas_limit, + eip_1559_params: attrs.eip_1559_params, + min_base_fee: attrs.min_base_fee, + }, + None => OpPayloadAttributes { + payload_attributes: builder_payload.payload_attributes(), + transactions: Some(builder_payload.transactions()), + no_tx_pool: Some(true), + gas_limit: None, + eip_1559_params: None, + min_base_fee: None, + }, + }; + + let l2_result = + self.l2_client.fork_choice_updated_v3(fcu_info.0, Some(new_payload_attrs)).await?; + + if let Some(new_payload_id) = l2_result.payload_id { + debug!( + message = "sent FCU to l2 to calculate new state root", + "returned_payload_id" = %new_payload_id, + "old_payload_id" = %payload_id, + ); + let l2_payload = self.l2_client.get_payload(new_payload_id, version).await; + + match l2_payload { + Ok(new_payload) => { + debug!( + message = "received new state root payload from l2", + payload = ?new_payload, + builder_payload = ?builder_payload, + ); + return Ok(Some(new_payload)); + } + + Err(e) => { + error!(message = "error getting new state root payload from l2", error = %e); + return Ok(None); + } + } + } + + Ok(None) + } +} + +impl TryInto> for RollupBoostServer { + type Error = RegisterMethodError; + + fn try_into(self) -> Result, Self::Error> { + let mut module: RpcModule<()> = RpcModule::new(()); + module.merge(EngineApiServer::into_rpc(self))?; + + for method in module.method_names() { + info!(?method, "method registered"); + } + + Ok(module) + } +} + +#[rpc(client)] +pub trait FlashblocksEngineApi { + /// When flashblocks p2p is enabled + /// we add an additional parameter `authorization` to the FCU + #[method(name = "flashblocks_forkchoiceUpdatedV3")] + async fn flashblocks_fork_choice_updated_v3( + &self, + fork_choice_state: ForkchoiceState, + payload_attributes: Option, + authorization: Option, + ) -> RpcResult; +} + +#[rpc(server, client)] +pub trait EngineApi { + #[method(name = "engine_forkchoiceUpdatedV3")] + async fn fork_choice_updated_v3( + &self, + fork_choice_state: ForkchoiceState, + payload_attributes: Option, + ) -> RpcResult; + + #[method(name = "engine_getPayloadV3")] + async fn get_payload_v3( + &self, + payload_id: PayloadId, + ) -> RpcResult; + + #[method(name = "engine_newPayloadV3")] + async fn new_payload_v3( + &self, + payload: ExecutionPayloadV3, + versioned_hashes: Vec, + parent_beacon_block_root: B256, + ) -> RpcResult; + + #[method(name = "engine_getPayloadV4")] + async fn get_payload_v4( + &self, + payload_id: PayloadId, + ) -> RpcResult; + + #[method(name = "engine_newPayloadV4")] + async fn new_payload_v4( + &self, + payload: OpExecutionPayloadV4, + versioned_hashes: Vec, + parent_beacon_block_root: B256, + execution_requests: Vec, + ) -> RpcResult; + + #[method(name = "eth_getBlockByNumber")] + async fn get_block_by_number(&self, number: BlockNumberOrTag, full: bool) -> RpcResult; +} + +#[async_trait] +impl EngineApiServer for RollupBoostServer { + #[instrument( + skip_all, + err, + fields( + otel.kind = ?SpanKind::Server, + head_block_hash = %fork_choice_state.head_block_hash, + timestamp = ?payload_attributes.as_ref().map(|attrs| attrs.payload_attributes.timestamp), + payload_id + ) + )] + async fn fork_choice_updated_v3( + &self, + fork_choice_state: ForkchoiceState, + payload_attributes: Option, + ) -> RpcResult { + // Send the FCU to the default l2 client + let l2_fut = + self.l2_client.fork_choice_updated_v3(fork_choice_state, payload_attributes.clone()); + + // If execution mode is disabled, return the l2 client response immediately + if self.execution_mode.lock().is_disabled() { + return Ok(l2_fut.await?); + } + + // If traffic to the unhealthy builder is not allowed and the builder is unhealthy, + if self.should_skip_unhealthy_builder() { + info!(message = "builder is unhealthy, skipping FCU to builder"); + return Ok(l2_fut.await?); + } + + let span = tracing::Span::current(); + // If the fcu contains payload attributes and the tx pool is disabled, + // only forward the FCU to the default l2 client + if let Some(attrs) = payload_attributes.as_ref() { + if attrs.no_tx_pool.unwrap_or_default() { + let l2_response = l2_fut.await?; + if let Some(payload_id) = l2_response.payload_id { + info!( + message = "block building started", + "payload_id" = %payload_id, + "builder_building" = false, + ); + + self.payload_trace_context + .store(payload_id, fork_choice_state.head_block_hash, false, span.id()) + .await; + } + + // We always return the value from the l2 client + return Ok(l2_response); + } else { + // If the tx pool is enabled, forward the fcu + // to both the builder and the default l2 client + let builder_fut = self + .builder_client + .fork_choice_updated_v3(fork_choice_state, payload_attributes.clone()); + + let (l2_result, builder_result) = tokio::join!(l2_fut, builder_fut); + let l2_response = l2_result?; + + if let Some(payload_id) = l2_response.payload_id { + info!( + message = "block building started", + "payload_id" = %payload_id, + "builder_building" = builder_result.is_ok(), + ); + + self.payload_trace_context + .store( + payload_id, + fork_choice_state.head_block_hash, + builder_result.is_ok(), + span.id(), + ) + .await; + + if self.external_state_root { + self.payload_to_fcu_request + .insert(payload_id, (fork_choice_state, payload_attributes)); + } + } + + return Ok(l2_response); + } + } else { + // If the FCU does not contain payload attributes + // forward the fcu to the builder to keep it synced and immediately return the l2 + // response without awaiting the builder + let builder_client = self.builder_client.clone(); + let attrs_clone = payload_attributes.clone(); + tokio::spawn(async move { + // It is not critical to wait for the builder response here + // During moments of high load, Op-node can send hundreds of FCU requests + // and we want to ensure that we don't block the main thread in those scenarios + builder_client.fork_choice_updated_v3(fork_choice_state, attrs_clone).await + }); + let l2_response = l2_fut.await?; + #[allow(clippy::collapsible_if)] + if let Some(payload_id) = l2_response.payload_id { + if self.external_state_root { + self.payload_to_fcu_request + .insert(payload_id, (fork_choice_state, payload_attributes)); + } + } + + return Ok(l2_response); + } + } + + #[instrument( + skip_all, + err, + fields( + otel.kind = ?SpanKind::Server, + %payload_id, + payload_source, + gas_delta, + tx_count_delta, + builder_has_payload, + flashblocks_count, + ) + )] + async fn get_payload_v3( + &self, + payload_id: PayloadId, + ) -> RpcResult { + info!("received get_payload_v3"); + + match self.get_payload(payload_id, PayloadVersion::V3).await? { + OpExecutionPayloadEnvelope::V3(v3) => Ok(v3), + OpExecutionPayloadEnvelope::V4(_) => Err(ErrorObject::owned( + INVALID_REQUEST_CODE, + "Payload version 4 not supported", + None::, + )), + } + } + + #[instrument( + skip_all, + err, + fields( + otel.kind = ?SpanKind::Server, + ) + )] + async fn new_payload_v3( + &self, + payload: ExecutionPayloadV3, + versioned_hashes: Vec, + parent_beacon_block_root: B256, + ) -> RpcResult { + info!("received new_payload_v3"); + + self.new_payload(NewPayload::V3(NewPayloadV3 { + payload, + versioned_hashes, + parent_beacon_block_root, + })) + .await + } + + #[instrument( + skip_all, + err, + fields( + otel.kind = ?SpanKind::Server, + %payload_id, + payload_source, + gas_delta, + tx_count_delta, + ) + )] + async fn get_payload_v4( + &self, + payload_id: PayloadId, + ) -> RpcResult { + info!("received get_payload_v4"); + + match self.get_payload(payload_id, PayloadVersion::V4).await? { + OpExecutionPayloadEnvelope::V4(v4) => Ok(v4), + OpExecutionPayloadEnvelope::V3(_) => Err(ErrorObject::owned( + INVALID_REQUEST_CODE, + "Payload version 4 not supported", + None::, + )), + } + } + + #[instrument( + skip_all, + err, + fields( + otel.kind = ?SpanKind::Server, + ) + )] + async fn new_payload_v4( + &self, + payload: OpExecutionPayloadV4, + versioned_hashes: Vec, + parent_beacon_block_root: B256, + execution_requests: Vec, + ) -> RpcResult { + info!("received new_payload_v4"); + + self.new_payload(NewPayload::V4(NewPayloadV4 { + payload, + versioned_hashes, + parent_beacon_block_root, + execution_requests, + })) + .await + } + + async fn get_block_by_number(&self, number: BlockNumberOrTag, full: bool) -> RpcResult { + Ok(self.l2_client.get_block_by_number(number, full).await?) + } +} + +pub async fn into_buffered_request(req: HttpRequest) -> Result { + let (parts, body) = req.into_parts(); + let bytes = body.collect().await?.to_bytes(); + let full = Full::::from(bytes.clone()); + Ok(http::Request::from_parts(parts, full)) +} + +pub fn from_buffered_request(req: BufferedRequest) -> HttpRequest { + req.map(HttpBody::new) +} + +#[cfg(test)] +#[allow(clippy::complexity)] +pub(crate) mod tests { + use super::*; + use crate::{probe::ProbeLayer, proxy::ProxyLayer}; + use alloy_primitives::{FixedBytes, U256, hex}; + use alloy_rpc_types_engine::{ + BlobsBundleV1, ExecutionPayloadV1, ExecutionPayloadV2, JwtSecret, PayloadStatusEnum, + }; + use http::{StatusCode, Uri}; + use jsonrpsee::{ + RpcModule, + http_client::HttpClient, + server::{Server, ServerBuilder, ServerHandle}, + }; + use parking_lot::Mutex; + use std::{net::SocketAddr, str::FromStr, sync::Arc}; + use tokio::time::sleep; + + #[derive(Debug, Clone)] + pub struct MockEngineServer { + fcu_requests: Arc)>>>, + pub get_payload_requests: Arc>>, + new_payload_requests: Arc, B256)>>>, + fcu_response: RpcResult, + get_payload_responses: Vec>, + new_payload_response: RpcResult, + + pub override_payload_id: Option, + } + + impl MockEngineServer { + pub fn new() -> Self { + Self { + fcu_requests: Arc::new(Mutex::new(vec![])), + get_payload_requests: Arc::new(Mutex::new(vec![])), + new_payload_requests: Arc::new(Mutex::new(vec![])), + fcu_response: Ok(ForkchoiceUpdated::new(PayloadStatus::from_status(PayloadStatusEnum::Valid))), + get_payload_responses: vec![Ok(OpExecutionPayloadEnvelopeV3{ + execution_payload: ExecutionPayloadV3 { + payload_inner: ExecutionPayloadV2 { + payload_inner: ExecutionPayloadV1 { + base_fee_per_gas: U256::from(7u64), + block_number: 0xa946u64, + block_hash: hex!("a5ddd3f286f429458a39cafc13ffe89295a7efa8eb363cf89a1a4887dbcf272b").into(), + logs_bloom: hex!("00200004000000000000000080000000000200000000000000000000000000000000200000000000000000000000000000000000800000000200000000000000000000000000000000000008000000200000000000000000000001000000000000000000000000000000800000000000000000000100000000000030000000000000000040000000000000000000000000000000000800080080404000000000000008000000000008200000000000200000000000000000000000000000000000000002000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000100000000000000000000").into(), + extra_data: hex!("d883010d03846765746888676f312e32312e31856c696e7578").into(), + gas_limit: 0x1c9c380, + gas_used: 0x1f4a9, + timestamp: 0x651f35b8, + fee_recipient: hex!("f97e180c050e5ab072211ad2c213eb5aee4df134").into(), + parent_hash: hex!("d829192799c73ef28a7332313b3c03af1f2d5da2c36f8ecfafe7a83a3bfb8d1e").into(), + prev_randao: hex!("753888cc4adfbeb9e24e01c84233f9d204f4a9e1273f0e29b43c4c148b2b8b7e").into(), + receipts_root: hex!("4cbc48e87389399a0ea0b382b1c46962c4b8e398014bf0cc610f9c672bee3155").into(), + state_root: hex!("017d7fa2b5adb480f5e05b2c95cb4186e12062eed893fc8822798eed134329d1").into(), + transactions: vec![], + }, + withdrawals: vec![], + }, + blob_gas_used: 0xc0000, + excess_blob_gas: 0x580000, + }, + block_value: U256::from(0), + blobs_bundle: BlobsBundleV1{ + commitments: vec![], + proofs: vec![], + blobs: vec![], + }, + should_override_builder: false, + parent_beacon_block_root: B256::ZERO, + })], + override_payload_id: None, + new_payload_response: Ok(PayloadStatus::from_status(PayloadStatusEnum::Valid)), + } + } + } + + struct TestHarness { + l2_server: ServerHandle, + l2_mock: MockEngineServer, + builder_server: ServerHandle, + builder_mock: MockEngineServer, + server: ServerHandle, + server_addr: SocketAddr, + rpc_client: HttpClient, + http_client: reqwest::Client, + } + + impl TestHarness { + async fn new( + l2_mock: Option, + builder_mock: Option, + ) -> Self { + Self::new_with_external_state_root(l2_mock, builder_mock, false).await + } + + async fn new_with_external_state_root( + l2_mock: Option, + builder_mock: Option, + external_state_root: bool, + ) -> Self { + let jwt_secret = JwtSecret::random(); + + let l2_mock = l2_mock.unwrap_or(MockEngineServer::new()); + let builder_mock = builder_mock.unwrap_or(MockEngineServer::new()); + let (l2_server, l2_server_addr) = spawn_server(l2_mock.clone()).await; + let (builder_server, builder_server_addr) = spawn_server(builder_mock.clone()).await; + + // Build l2 clients + let l2_client_args = ClientArgs { + url: format!("http://{l2_server_addr}") + .parse() + .expect("l2 server address is valid url"), + jwt_token: Some(jwt_secret), + jwt_path: None, + timeout: 2000, + }; + let l2_rpc_client = l2_client_args.new_rpc_client(PayloadSource::L2).unwrap(); + let l2_http_client = l2_client_args.new_http_client(PayloadSource::L2).unwrap(); + + // Build builder clients + let builder_client_args = ClientArgs { + url: Uri::from_str(&format!("http://{builder_server_addr}")).unwrap(), + jwt_token: Some(jwt_secret), + jwt_path: None, + timeout: 2000, + }; + let builder_rpc_client = + Arc::new(builder_client_args.new_rpc_client(PayloadSource::Builder).unwrap()); + let builder_http_client = + builder_client_args.new_http_client(PayloadSource::Builder).unwrap(); + + let (probe_layer, probes) = ProbeLayer::new(); + + // For tests, set initial health to Healthy since we don't run health checks + probes.set_health(Health::Healthy); + + let rollup_boost = RollupBoostServer::new( + l2_rpc_client, + builder_rpc_client, + Arc::new(Mutex::new(ExecutionMode::Enabled)), + None, + probes.clone(), + external_state_root, + true, + ); + + let module: RpcModule<()> = rollup_boost.try_into().unwrap(); + + let http_middleware = tower::ServiceBuilder::new() + .layer(probe_layer) + .layer(ProxyLayer::new(l2_http_client, builder_http_client)); + + let server = Server::builder() + .set_http_middleware(http_middleware) + .build("127.0.0.1:0".parse::().unwrap()) + .await + .unwrap(); + + let server_addr = server.local_addr().expect("missing server address"); + + let server = server.start(module); + + let rpc_client = HttpClient::builder().build(format!("http://{server_addr}")).unwrap(); + let http_client = reqwest::Client::new(); + + TestHarness { + l2_server, + l2_mock, + builder_server, + builder_mock, + server, + server_addr, + rpc_client, + http_client, + } + } + + async fn get(&self, path: &str) -> reqwest::Response { + self.http_client + .get(format!("http://{}/{}", self.server_addr, path)) + .send() + .await + .unwrap() + } + + async fn cleanup(self) { + self.l2_server.stop().unwrap(); + self.l2_server.stopped().await; + self.builder_server.stop().unwrap(); + self.builder_server.stopped().await; + self.server.stop().unwrap(); + self.server.stopped().await; + } + } + + #[tokio::test] + async fn engine_success() { + let test_harness = TestHarness::new(None, None).await; + + // Since no blocks have been created, the service should be unavailable + let health = test_harness.get("healthz").await; + assert_eq!(health.status(), StatusCode::OK); + + // test fork_choice_updated_v3 success + let fcu = ForkchoiceState { + head_block_hash: FixedBytes::random(), + safe_block_hash: FixedBytes::random(), + finalized_block_hash: FixedBytes::random(), + }; + let fcu_response = test_harness.rpc_client.fork_choice_updated_v3(fcu, None).await; + assert!(fcu_response.is_ok()); + let fcu_requests = test_harness.l2_mock.fcu_requests.clone(); + { + let fcu_requests_mu = fcu_requests.lock(); + let fcu_requests_builder = test_harness.builder_mock.fcu_requests.clone(); + let fcu_requests_builder_mu = fcu_requests_builder.lock(); + assert_eq!(fcu_requests_mu.len(), 1); + assert_eq!(fcu_requests_builder_mu.len(), 1); + let req: &(ForkchoiceState, Option) = + fcu_requests_mu.first().unwrap(); + assert_eq!(req.0, fcu); + assert_eq!(req.1, None); + } + + // test new_payload_v3 success + let new_payload_response = test_harness + .rpc_client + .new_payload_v3( + test_harness.l2_mock.get_payload_responses[0] + .clone() + .unwrap() + .execution_payload + .clone(), + vec![], + B256::ZERO, + ) + .await; + assert!(new_payload_response.is_ok()); + let new_payload_requests = test_harness.l2_mock.new_payload_requests.clone(); + { + let new_payload_requests_mu = new_payload_requests.lock(); + let new_payload_requests_builder = + test_harness.builder_mock.new_payload_requests.clone(); + let new_payload_requests_builder_mu = new_payload_requests_builder.lock(); + assert_eq!(new_payload_requests_mu.len(), 1); + assert_eq!(new_payload_requests_builder_mu.len(), 1); + let req: &(ExecutionPayloadV3, Vec>, B256) = + new_payload_requests_mu.first().unwrap(); + assert_eq!( + req.0, + test_harness.l2_mock.get_payload_responses[0] + .clone() + .unwrap() + .execution_payload + .clone() + ); + assert_eq!(req.1, Vec::>::new()); + assert_eq!(req.2, B256::ZERO); + } + + // test get_payload_v3 success + let get_payload_response = + test_harness.rpc_client.get_payload_v3(PayloadId::new([0, 0, 0, 0, 0, 0, 0, 1])).await; + assert!(get_payload_response.is_ok()); + let get_payload_requests = test_harness.l2_mock.get_payload_requests.clone(); + { + let get_payload_requests_mu = get_payload_requests.lock(); + let get_payload_requests_builder = + test_harness.builder_mock.get_payload_requests.clone(); + let get_payload_requests_builder_mu = get_payload_requests_builder.lock(); + let new_payload_requests = test_harness.l2_mock.new_payload_requests.clone(); + let new_payload_requests_mu = new_payload_requests.lock(); + assert_eq!(get_payload_requests_builder_mu.len(), 0); + assert_eq!(get_payload_requests_mu.len(), 1); + assert_eq!(new_payload_requests_mu.len(), 1); + let req: &PayloadId = get_payload_requests_mu.first().unwrap(); + assert_eq!(*req, PayloadId::new([0, 0, 0, 0, 0, 0, 0, 1])); + } + + // Now that a block has been produced by the l2 but not the builder + // the health status should be Partial Content + let health = test_harness.get("healthz").await; + assert_eq!(health.status(), StatusCode::PARTIAL_CONTENT); + + test_harness.cleanup().await; + } + + #[tokio::test] + async fn builder_payload_err() { + let mut l2_mock = MockEngineServer::new(); + l2_mock.new_payload_response = l2_mock.new_payload_response.clone().map(|mut status| { + status.status = PayloadStatusEnum::Invalid { validation_error: "test".to_string() }; + status + }); + l2_mock.get_payload_responses[0] = + l2_mock.get_payload_responses[0].clone().map(|mut payload| { + payload.block_value = U256::from(10); + payload + }); + let test_harness = TestHarness::new(Some(l2_mock), None).await; + + // test get_payload_v3 return l2 payload if builder payload is invalid + let get_payload_response = + test_harness.rpc_client.get_payload_v3(PayloadId::new([0, 0, 0, 0, 0, 0, 0, 0])).await; + assert!(get_payload_response.is_ok()); + assert_eq!(get_payload_response.unwrap().block_value, U256::from(10)); + + test_harness.cleanup().await; + } + + pub async fn spawn_server(mock_engine_server: MockEngineServer) -> (ServerHandle, SocketAddr) { + let server = ServerBuilder::default().build("127.0.0.1:0").await.unwrap(); + let server_addr = server.local_addr().expect("Missing local address"); + + let mut module: RpcModule<()> = RpcModule::new(()); + + module + .register_method("engine_forkchoiceUpdatedV3", move |params, _, _| { + let params: (ForkchoiceState, Option) = params.parse()?; + let mut fcu_requests = mock_engine_server.fcu_requests.lock(); + fcu_requests.push(params); + + let mut response = mock_engine_server.fcu_response.clone(); + if let Ok(ref mut fcu_response) = response { + if let Some(override_id) = mock_engine_server.override_payload_id { + fcu_response.payload_id = Some(override_id); + } + } + + response + }) + .unwrap(); + + module + .register_method("engine_getPayloadV3", move |params, _, _| { + let params: (PayloadId,) = params.parse()?; + let mut get_payload_requests = mock_engine_server.get_payload_requests.lock(); + get_payload_requests.push(params.0); + + // Return the response based on the call index, or the last one if we exceed the + // list + let response_index = get_payload_requests.len().saturating_sub(1); + if response_index < mock_engine_server.get_payload_responses.len() { + mock_engine_server.get_payload_responses[response_index].clone() + } else { + // If we have more calls than responses, use the last response + mock_engine_server.get_payload_responses.last().cloned().unwrap_or_else(|| { + Err(ErrorObject::owned( + INVALID_REQUEST_CODE, + "No response configured", + None::, + )) + }) + } + }) + .unwrap(); + + module + .register_method("engine_newPayloadV3", move |params, _, _| { + let params: (ExecutionPayloadV3, Vec, B256) = params.parse()?; + let mut new_payload_requests = mock_engine_server.new_payload_requests.lock(); + new_payload_requests.push(params); + + mock_engine_server.new_payload_response.clone() + }) + .unwrap(); + + (server.start(module), server_addr) + } + + #[tokio::test] + async fn test_local_external_payload_ids_same() { + let same_id: PayloadId = PayloadId::new([0, 0, 0, 0, 0, 0, 0, 42]); + + let mut l2_mock = MockEngineServer::new(); + l2_mock.fcu_response = + Ok(ForkchoiceUpdated::new(PayloadStatus::from_status(PayloadStatusEnum::Valid)) + .with_payload_id(same_id)); + + let mut builder_mock = MockEngineServer::new(); + builder_mock.override_payload_id = Some(same_id); + + let test_harness = + TestHarness::new(Some(l2_mock.clone()), Some(builder_mock.clone())).await; + + // Test FCU call + let fcu = ForkchoiceState { + head_block_hash: FixedBytes::random(), + safe_block_hash: FixedBytes::random(), + finalized_block_hash: FixedBytes::random(), + }; + let fcu_response = test_harness.rpc_client.fork_choice_updated_v3(fcu, None).await; + assert!(fcu_response.is_ok()); + + // wait for builder to observe the FCU call + sleep(std::time::Duration::from_millis(100)).await; + + { + let builder_fcu_req = builder_mock.fcu_requests.lock(); + assert_eq!(builder_fcu_req.len(), 1); + assert_eq!(l2_mock.fcu_requests.lock().len(), 1); + } + + // Test getPayload call + let get_res = test_harness.rpc_client.get_payload_v3(same_id).await; + assert!(get_res.is_ok()); + + // wait for builder to observe the getPayload call + sleep(std::time::Duration::from_millis(100)).await; + + { + let builder_gp_reqs = builder_mock.get_payload_requests.lock(); + assert_eq!(builder_gp_reqs.len(), 0); + } + + { + let local_gp_reqs = l2_mock.get_payload_requests.lock(); + assert_eq!(local_gp_reqs.len(), 1); + assert_eq!(local_gp_reqs[0], same_id); + } + + test_harness.cleanup().await; + } + + #[tokio::test] + async fn has_builder_payload() { + let payload_id: PayloadId = PayloadId::new([0, 0, 0, 0, 0, 0, 0, 42]); + let mut l2_mock = MockEngineServer::new(); + l2_mock.fcu_response = + Ok(ForkchoiceUpdated::new(PayloadStatus::from_status(PayloadStatusEnum::Valid)) + .with_payload_id(payload_id)); + l2_mock.get_payload_responses[0] = + l2_mock.get_payload_responses[0].clone().map(|mut payload| { + payload.block_value = U256::from(10); + payload + }); + + let mut builder_mock = MockEngineServer::new(); + builder_mock.fcu_response = + Ok(ForkchoiceUpdated::new(PayloadStatus::from_status(PayloadStatusEnum::Syncing)) + .with_payload_id(payload_id)); + builder_mock.get_payload_responses[0] = + builder_mock.get_payload_responses[0].clone().map(|mut payload| { + payload.block_value = U256::from(15); + payload + }); + + let test_harness = TestHarness::new(Some(l2_mock), Some(builder_mock)).await; + let fcu = ForkchoiceState { + head_block_hash: FixedBytes::random(), + safe_block_hash: FixedBytes::random(), + finalized_block_hash: FixedBytes::random(), + }; + let mut payload_attributes = + OpPayloadAttributes { gas_limit: Some(1000000), ..Default::default() }; + let fcu_response = test_harness + .rpc_client + .fork_choice_updated_v3(fcu, Some(payload_attributes.clone())) + .await; + fcu_response.unwrap(); + + // no tx pool is false so should return the builder payload + let get_payload_response = test_harness.rpc_client.get_payload_v3(payload_id).await; + assert!(get_payload_response.is_ok()); + assert_eq!(get_payload_response.unwrap().block_value, U256::from(15)); + + payload_attributes.no_tx_pool = Some(true); + let fcu_response = + test_harness.rpc_client.fork_choice_updated_v3(fcu, Some(payload_attributes)).await; + assert!(fcu_response.is_ok()); + + // no tx pool is true so should return the l2 payload + let get_payload_response = test_harness.rpc_client.get_payload_v3(payload_id).await; + assert!(get_payload_response.is_ok()); + assert_eq!(get_payload_response.unwrap().block_value, U256::from(10)); + + test_harness.cleanup().await; + } + + #[tokio::test] + async fn l2_client_fails_fcu() { + // If the canonical l2 client fails the FCU call, it does not matter what the builder + // returns the FCU call should fail + let mut l2_mock = MockEngineServer::new(); + l2_mock.fcu_response = Err(ErrorObject::owned( + INVALID_REQUEST_CODE, + "Payload version 4 not supported", + None::, + )); + + let test_harness = TestHarness::new(Some(l2_mock), None).await; + + let fcu = ForkchoiceState { + head_block_hash: FixedBytes::random(), + safe_block_hash: FixedBytes::random(), + finalized_block_hash: FixedBytes::random(), + }; + let fcu_response = test_harness.rpc_client.fork_choice_updated_v3(fcu, None).await; + assert!(fcu_response.is_err()); + + let payload_attributes = + OpPayloadAttributes { gas_limit: Some(1000000), ..Default::default() }; + let fcu_response = + test_harness.rpc_client.fork_choice_updated_v3(fcu, Some(payload_attributes)).await; + assert!(fcu_response.is_err()); + } + + // Helper function to create mock servers and run a test scenario + async fn run_health_test_scenario( + l2_mock: MockEngineServer, + builder_mock: Option, + expected_health: StatusCode, + external_state_root: bool, + expect_l2_get_payload_success: bool, + ) { + let test_harness = TestHarness::new_with_external_state_root( + Some(l2_mock), + builder_mock, + external_state_root, + ) + .await; + + let response = test_harness + .rpc_client + .fork_choice_updated_v3( + ForkchoiceState { + head_block_hash: FixedBytes::random(), + safe_block_hash: FixedBytes::random(), + finalized_block_hash: FixedBytes::random(), + }, + Some(OpPayloadAttributes { gas_limit: Some(1000000), ..Default::default() }), + ) + .await; + assert!(response.is_ok()); + + let payload_id = response.unwrap().payload_id.unwrap(); + let get_payload_response = test_harness.rpc_client.get_payload_v3(payload_id).await; + if expect_l2_get_payload_success { + assert!(get_payload_response.is_ok()); + } else { + assert!(get_payload_response.is_err()); + } + + let health = test_harness.get("healthz").await; + assert_eq!(health.status(), expected_health); + + test_harness.cleanup().await; + } + + #[tokio::test] + async fn builder_api_failure_vs_processing_failure() { + let payload_id = PayloadId::new([0, 0, 0, 0, 0, 0, 0, 1]); + let valid_fcu = + ForkchoiceUpdated::new(PayloadStatus::from_status(PayloadStatusEnum::Valid)) + .with_payload_id(payload_id); + + // Test 1: Builder API failure should mark as unhealthy + { + let mut l2_mock = MockEngineServer::new(); + l2_mock.fcu_response = Ok(valid_fcu.clone()); + + let mut builder_mock = MockEngineServer::new(); + builder_mock.fcu_response = Ok(valid_fcu.clone()); + builder_mock.get_payload_responses[0] = + Err(ErrorObject::owned(INVALID_REQUEST_CODE, "Builder API failed", None::)); + + run_health_test_scenario( + l2_mock, + Some(builder_mock), + StatusCode::PARTIAL_CONTENT, + false, + true, + ) + .await; + } + + // Test 2: L2 validation failure + { + let mut l2_mock = MockEngineServer::new(); + l2_mock.fcu_response = Ok(valid_fcu.clone()); + l2_mock.new_payload_response = Err(ErrorObject::owned( + INVALID_REQUEST_CODE, + "L2 validation failed", + None::, + )); + + let mut builder_mock = MockEngineServer::new(); + builder_mock.fcu_response = Ok(valid_fcu.clone()); + builder_mock.get_payload_responses[0] = + builder_mock.get_payload_responses[0].clone().map(|mut p| { + p.block_value = U256::from(15); + p + }); + + // L2 validation failure still marks as unhealthy + run_health_test_scenario( + l2_mock.clone(), + Some(builder_mock.clone()), + StatusCode::PARTIAL_CONTENT, + false, + true, + ) + .await; + } + + // Test 3: Both APIs succeed - should remain healthy + { + let mut l2_mock = MockEngineServer::new(); + l2_mock.fcu_response = Ok(valid_fcu.clone()); + l2_mock.get_payload_responses[0] = + l2_mock.get_payload_responses[0].clone().map(|mut p| { + p.block_value = U256::from(5); + p + }); + + let mut builder_mock = MockEngineServer::new(); + builder_mock.fcu_response = Ok(valid_fcu.clone()); + builder_mock.get_payload_responses[0] = + builder_mock.get_payload_responses[0].clone().map(|mut p| { + p.block_value = U256::from(20); + p + }); + + run_health_test_scenario(l2_mock, Some(builder_mock), StatusCode::OK, false, true) + .await; + } + + // Test 4: Builder FCU fails (no payload tracked) - should mark as unhealthy + { + let mut l2_mock = MockEngineServer::new(); + l2_mock.fcu_response = Ok(valid_fcu.clone()); + l2_mock.get_payload_responses[0] = + l2_mock.get_payload_responses[0].clone().map(|mut p| { + p.block_value = U256::from(8); + p + }); + + let mut builder_mock = MockEngineServer::new(); + builder_mock.fcu_response = + Err(ErrorObject::owned(INVALID_REQUEST_CODE, "Builder FCU failed", None::)); + + run_health_test_scenario( + l2_mock, + Some(builder_mock), + StatusCode::PARTIAL_CONTENT, + false, + true, + ) + .await; + } + + // Test 5: External state root - L2 second call fails but builder API succeeded + { + let mut l2_mock = MockEngineServer::new(); + l2_mock.fcu_response = Ok(valid_fcu.clone()); + // First L2 get_payload call succeeds + l2_mock.get_payload_responses[0] = + l2_mock.get_payload_responses[0].clone().map(|mut p| { + p.block_value = U256::from(5); + p + }); + // Second L2 get_payload call (for external state root) fails + l2_mock.get_payload_responses.push(Err(ErrorObject::owned( + INVALID_REQUEST_CODE, + "L2 external state root failed", + None::, + ))); + + let mut builder_mock = MockEngineServer::new(); + builder_mock.fcu_response = Ok(valid_fcu.clone()); + builder_mock.get_payload_responses[0] = + builder_mock.get_payload_responses[0].clone().map(|mut p| { + p.block_value = U256::from(30); + p + }); + + run_health_test_scenario(l2_mock, Some(builder_mock), StatusCode::OK, true, true).await; + } + } +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/tests/builder_full_delay.rs b/rust/rollup-boost/crates/rollup-boost/src/tests/builder_full_delay.rs new file mode 100644 index 0000000000000..38d97cd0ce66a --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/tests/builder_full_delay.rs @@ -0,0 +1,68 @@ +use super::common::{RollupBoostTestHarnessBuilder, proxy::BuilderProxyHandler}; +use futures::FutureExt; +use serde_json::Value; +use std::{ + pin::Pin, + sync::{Arc, Mutex}, + time::Duration, +}; + +// Create a dynamic handler that delays all the calls by 2 seconds +struct DelayHandler { + delay: Arc>, +} + +impl BuilderProxyHandler for DelayHandler { + fn handle( + &self, + _method: String, + _params: Value, + _result: Value, + ) -> Pin> + Send>> { + let delay = *self.delay.lock().unwrap(); + async move { + tokio::time::sleep(delay).await; + None + } + .boxed() + } +} + +#[tokio::test] +async fn builder_full_delay() -> eyre::Result<()> { + let delay = Arc::new(Mutex::new(Duration::from_secs(0))); + + let handler = Arc::new(DelayHandler { delay: delay.clone() }); + + // This integration test checks that if the builder has a general delay in processing ANY of the + // requests, rollup-boost does not stop building blocks. + let harness = RollupBoostTestHarnessBuilder::new("builder_full_delay") + .proxy_handler(handler) + .build() + .await?; + + let mut block_generator = harness.block_generator().await?; + + // create 3 blocks that are processed by the builder + for _ in 0..3 { + let (_block, block_creator) = block_generator.generate_block(false).await?; + assert!(block_creator.is_builder(), "Block creator should be the builder"); + } + + // create 3 blocks that are processed by the builder + for _ in 0..3 { + let (_block, block_creator) = block_generator.generate_block(false).await?; + assert!(block_creator.is_builder(), "Block creator should be the builder"); + } + + // add the delay + *delay.lock().unwrap() = Duration::from_secs(5); + + // create 3 blocks that are processed by the builder + for _ in 0..3 { + let (_block, block_creator) = block_generator.generate_block(false).await?; + assert!(block_creator.is_l2(), "Block creator should be the l2"); + } + + Ok(()) +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/tests/builder_returns_incorrect_block.rs b/rust/rollup-boost/crates/rollup-boost/src/tests/builder_returns_incorrect_block.rs new file mode 100644 index 0000000000000..5052de3795a24 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/tests/builder_returns_incorrect_block.rs @@ -0,0 +1,63 @@ +use std::{pin::Pin, sync::Arc}; + +use super::common::{RollupBoostTestHarnessBuilder, proxy::BuilderProxyHandler}; +use alloy_primitives::B256; +use futures::FutureExt as _; +use serde_json::Value; + +use op_alloy_rpc_types_engine::OpExecutionPayloadEnvelopeV3; + +struct Handler; + +impl BuilderProxyHandler for Handler { + fn handle( + &self, + method: String, + _params: Value, + _result: Value, + ) -> Pin> + Send>> { + async move { + if method != "engine_getPayloadV3" { + return None; + } + + let mut payload = + serde_json::from_value::(_result).unwrap(); + + // modify the state root field + payload.execution_payload.payload_inner.payload_inner.state_root = B256::ZERO; + + let result = serde_json::to_value(&payload).unwrap(); + Some(result) + } + .boxed() + } +} + +#[tokio::test] +async fn builder_returns_incorrect_block() -> eyre::Result<()> { + // Test that the builder returns a block with an incorrect state root and that rollup-boost + // does not process it. + let harness = RollupBoostTestHarnessBuilder::new("builder_returns_incorrect_block") + .proxy_handler(Arc::new(Handler)) + .build() + .await?; + + let mut block_generator = harness.block_generator().await?; + + // create 3 blocks that are processed by the builder + for _ in 0..3 { + let (_block, block_creator) = block_generator.generate_block(false).await?; + assert!(block_creator.is_l2(), "Block creator should be the l2"); + } + // check that at some point we had the log "builder payload was not valid" which signals + // that the builder returned a payload that was not valid and rollup-boost did not process it. + // read lines + let logs = std::fs::read_to_string(harness.rollup_boost.args().log_file.clone().unwrap())?; + assert!( + logs.contains("Invalid payload"), + "Logs should contain the message 'builder payload was not valid'" + ); + + Ok(()) +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/tests/common/mod.rs b/rust/rollup-boost/crates/rollup-boost/src/tests/common/mod.rs new file mode 100644 index 0000000000000..640c0fecfc0dd --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/tests/common/mod.rs @@ -0,0 +1,632 @@ +#![allow(dead_code)] +use crate::{AuthLayer, AuthService, DebugClient, EngineApiClient}; +use alloy_eips::Encodable2718; +use alloy_primitives::{B256, Bytes, TxKind, U256, address, hex}; +use alloy_rpc_types_engine::{ + ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, JwtSecret, PayloadAttributes, PayloadId, + PayloadStatus, PayloadStatusEnum, +}; +use alloy_rpc_types_eth::BlockNumberOrTag; +use bytes::BytesMut; +use eyre::{Context, ContextCompat}; +use futures::{FutureExt, future::BoxFuture}; +use jsonrpsee::{ + core::middleware::layer::RpcLogger, + http_client::{HttpClient, RpcService, transport::HttpBackend}, + proc_macros::rpc, +}; +use op_alloy_consensus::TxDeposit; +use op_alloy_rpc_types_engine::OpPayloadAttributes; +use parking_lot::Mutex; +use proxy::{BuilderProxyHandler, start_proxy_server}; +use rollup_boost_types::payload::{ + NewPayload, OpExecutionPayloadEnvelope, PayloadSource, PayloadVersion, +}; +use serde_json::Value; +use services::{ + op_reth::{AUTH_RPC_PORT, OpRethConfig, OpRethImage, OpRethMethods, P2P_PORT}, + rollup_boost::{RollupBoost, RollupBoostConfig}, +}; +use std::{ + collections::HashSet, + fs::File, + io::BufReader, + net::TcpListener, + path::PathBuf, + str::FromStr, + sync::{Arc, LazyLock}, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; +use testcontainers::{ + ContainerAsync, ImageExt, + core::{ + ContainerPort, + client::docker_client_instance, + logs::{LogFrame, consumer::LogConsumer}, + }, + runners::AsyncRunner, +}; +use time::{OffsetDateTime, format_description}; +use tokio::io::AsyncWriteExt as _; +use tower_http::sensitive_headers::SetSensitiveRequestHeaders; + +/// Default JWT token for testing purposes +pub const JWT_SECRET: &str = "688f5d737bad920bdfb2fc2f488d6b6209eebda1dae949a8de91398d932c517a"; +pub const L2_P2P_ENODE: &str = "3479db4d9217fb5d7a8ed4d61ac36e120b05d36c2eefb795dc42ff2e971f251a2315f5649ea1833271e020b9adc98d5db9973c7ed92d6b2f1f2223088c3d852f"; +pub static TEST_DATA: LazyLock = + LazyLock::new(|| format!("{}/src/tests/common/test_data", env!("CARGO_MANIFEST_DIR"))); + +pub mod proxy; +pub mod services; + +pub struct LoggingConsumer { + target: String, + log_file: tokio::sync::Mutex, +} + +impl LogConsumer for LoggingConsumer { + fn accept<'a>(&'a self, record: &'a LogFrame) -> BoxFuture<'a, ()> { + async move { + match record { + testcontainers::core::logs::LogFrame::StdOut(bytes) => { + self.log_file.lock().await.write_all(bytes).await.unwrap(); + } + testcontainers::core::logs::LogFrame::StdErr(bytes) => { + self.log_file.lock().await.write_all(bytes).await.unwrap(); + } + } + } + .boxed() + } +} + +pub struct EngineApi { + pub engine_api_client: + HttpClient>>>>, +} + +// TODO: Use client/rpc.rs instead +impl EngineApi { + pub fn new(url: &str, secret: &str) -> eyre::Result { + let secret_layer = AuthLayer::new(JwtSecret::from_str(secret)?); + let middleware = tower::ServiceBuilder::default().layer(secret_layer); + let client = jsonrpsee::http_client::HttpClientBuilder::default() + .set_http_middleware(middleware) + .build(url) + .context("Failed to create http client")?; + + Ok(Self { engine_api_client: client }) + } + + pub async fn get_payload( + &self, + version: PayloadVersion, + payload_id: PayloadId, + ) -> eyre::Result { + match version { + PayloadVersion::V3 => Ok(OpExecutionPayloadEnvelope::V3( + EngineApiClient::get_payload_v3(&self.engine_api_client, payload_id).await?, + )), + PayloadVersion::V4 => Ok(OpExecutionPayloadEnvelope::V4( + EngineApiClient::get_payload_v4(&self.engine_api_client, payload_id).await?, + )), + } + } + + pub async fn new_payload(&self, payload: NewPayload) -> eyre::Result { + match payload { + NewPayload::V3(new_payload) => Ok(EngineApiClient::new_payload_v3( + &self.engine_api_client, + new_payload.payload, + new_payload.versioned_hashes, + new_payload.parent_beacon_block_root, + ) + .await?), + NewPayload::V4(new_payload) => Ok(EngineApiClient::new_payload_v4( + &self.engine_api_client, + new_payload.payload, + new_payload.versioned_hashes, + new_payload.parent_beacon_block_root, + new_payload.execution_requests, + ) + .await?), + } + } + + pub async fn update_forkchoice( + &self, + current_head: B256, + new_head: B256, + payload_attributes: Option, + ) -> eyre::Result { + Ok(EngineApiClient::fork_choice_updated_v3( + &self.engine_api_client, + ForkchoiceState { + head_block_hash: new_head, + safe_block_hash: current_head, + finalized_block_hash: current_head, + }, + payload_attributes, + ) + .await?) + } + + pub async fn latest(&self) -> eyre::Result> { + Ok(BlockApiClient::get_block_by_number( + &self.engine_api_client, + BlockNumberOrTag::Latest, + false, + ) + .await?) + } + + pub async fn set_max_da_size(&self, max_da_size: u64, max_da_gas: u64) -> eyre::Result { + Ok(MinerApiClient::set_max_da_size(&self.engine_api_client, max_da_size, max_da_gas) + .await?) + } +} + +#[rpc(client, namespace = "eth")] +pub trait BlockApi { + #[method(name = "getBlockByNumber")] + async fn get_block_by_number( + &self, + block_number: BlockNumberOrTag, + include_txs: bool, + ) -> RpcResult>; +} + +#[rpc(client, namespace = "miner")] +pub trait MinerApi { + #[method(name = "setMaxDASize")] + async fn set_max_da_size(&self, max_da_size: u64, max_da_gas: u64) -> RpcResult; +} + +#[derive(Clone)] +pub struct Genesis { + pub timestamp: u64, + pub isthmus_block: Option, + pub block_time: u64, +} + +impl Genesis { + fn to_string(&self) -> eyre::Result { + let file = File::open(PathBuf::from(format!("{}/genesis.json", *TEST_DATA))).unwrap(); + let reader = BufReader::new(file); + let mut genesis: Value = serde_json::from_reader(reader).unwrap(); + + if let Some(config) = genesis.as_object_mut() { + // Assuming timestamp is at the root level - adjust path as needed + config["timestamp"] = Value::String(format!("0x{:x}", self.timestamp)); + + if let Some(isthmus_block) = self.isthmus_block { + // In the genesis file the Isthmus fork is represented not as a block number + // but as a timestamp. + let isthmus_time = self.timestamp + isthmus_block * self.block_time; + if let Some(config_obj) = config.get_mut("config").and_then(|c| c.as_object_mut()) { + // you need to enable also the Prague hardfork since it is the one that + // enables the engine v4 API. + // In the test_data/genesis.json file they are set as '100000000000000' which + // represents that they are not enabled. + config_obj["isthmusTime"] = + Value::Number(serde_json::Number::from(isthmus_time)); + config_obj["pragueTime"] = + Value::Number(serde_json::Number::from(isthmus_time)); + } + } + } + + serde_json::to_string_pretty(&genesis) + .map_err(|e| eyre::eyre!("Failed to serialize genesis: {}", e)) + } +} + +/// Test flavor that sets up one Rollup-boost instance connected to two Reth nodes +pub struct RollupBoostTestHarness { + pub l2: ContainerAsync, + pub builder: ContainerAsync, + pub rollup_boost: RollupBoost, + pub genesis: Genesis, +} + +pub struct RollupBoostTestHarnessBuilder { + test_name: String, + proxy_handler: Option>, + isthmus_block: Option, + block_time: u64, + external_state_root: bool, + ignore_unhealthy_builders: Option, + max_unsafe_interval: Option, +} + +impl RollupBoostTestHarnessBuilder { + pub fn new(test_name: &str) -> Self { + Self { + test_name: test_name.to_string(), + proxy_handler: None, + isthmus_block: None, + block_time: 1, + external_state_root: false, + ignore_unhealthy_builders: None, + max_unsafe_interval: None, + } + } + + pub fn with_isthmus_block(mut self, isthmus_block: u64) -> Self { + self.isthmus_block = Some(isthmus_block); + self + } + + pub fn with_block_time(mut self, block_time: u64) -> Self { + self.block_time = block_time; + self + } + + pub fn file_path(&self, service_name: &str) -> eyre::Result { + let dt: OffsetDateTime = SystemTime::now().into(); + let format = format_description::parse("[year]_[month]_[day]_[hour]_[minute]_[second]")?; + let timestamp = dt.format(&format)?; + + let dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("../../integration_logs") + .join(self.test_name.clone()) + .join(timestamp); + std::fs::create_dir_all(&dir)?; + + let file_name = format!("{service_name}.log"); + Ok(dir.join(file_name)) + } + + pub async fn async_log_file(&self, service_name: &str) -> eyre::Result { + let file_path = self.file_path(service_name)?; + Ok(tokio::fs::OpenOptions::new().append(true).create(true).open(file_path).await?) + } + + pub async fn log_consumer(&self, service_name: &str) -> eyre::Result { + let file = self.async_log_file(service_name).await?; + Ok(LoggingConsumer { + target: service_name.to_string(), + log_file: tokio::sync::Mutex::new(file), + }) + } + + pub fn proxy_handler(mut self, proxy_handler: Arc) -> Self { + self.proxy_handler = Some(proxy_handler); + self + } + + pub fn with_l2_state_root_computation(mut self, enabled: bool) -> Self { + self.external_state_root = enabled; + self + } + + pub fn with_ignore_unhealthy_builders(mut self, enabled: bool) -> Self { + self.ignore_unhealthy_builders = Some(enabled); + self + } + + pub fn with_max_unsafe_interval(mut self, interval_secs: u64) -> Self { + self.max_unsafe_interval = Some(interval_secs); + self + } + + pub async fn build(self) -> eyre::Result { + let network = rand::random::().to_string(); + let l2_log_consumer = self.log_consumer("l2").await?; + let builder_log_consumer = self.log_consumer("builder").await?; + let rollup_boost_log_file_path = self.file_path("rollup_boost")?; + + let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(); + + let genesis = + Genesis { timestamp, isthmus_block: self.isthmus_block, block_time: self.block_time }; + + let genesis_str = genesis.to_string()?; + + let l2_p2p_port = get_available_port(); + let l2 = OpRethConfig::default() + .set_p2p_secret(Some(PathBuf::from(format!("{}/p2p_secret.hex", *TEST_DATA)))) + .set_genesis(genesis_str.clone()) + .build()? + .with_mapped_port(l2_p2p_port, ContainerPort::Tcp(P2P_PORT)) + .with_mapped_port(l2_p2p_port, ContainerPort::Udp(P2P_PORT)) + .with_mapped_port(get_available_port(), ContainerPort::Tcp(AUTH_RPC_PORT)) + .with_network(&network) + .with_log_consumer(l2_log_consumer) + .start() + .await?; + + let client = docker_client_instance().await?; + let res = client.inspect_container(l2.id(), None).await?; + let name = res.name.unwrap()[1..].to_string(); // remove the leading '/' + + let l2_enode = format!("enode://{L2_P2P_ENODE}@{name}:{P2P_PORT}"); + + let builder_p2p_port = get_available_port(); + let builder = OpRethConfig::default() + .set_trusted_peers(vec![l2_enode]) + .set_genesis(genesis_str) + .build()? + .with_mapped_port(builder_p2p_port, ContainerPort::Tcp(P2P_PORT)) + .with_mapped_port(builder_p2p_port, ContainerPort::Udp(P2P_PORT)) + .with_mapped_port(get_available_port(), ContainerPort::Tcp(AUTH_RPC_PORT)) + .with_network(&network) + .with_log_consumer(builder_log_consumer) + .start() + .await?; + + println!("l2 authrpc: {}", l2.auth_rpc().await?); + println!("builder authrpc: {}", builder.auth_rpc().await?); + + // run a proxy in between the builder and the rollup-boost if the proxy_handler is set + let mut builder_authrpc_port = builder.auth_rpc_port().await?; + if let Some(proxy_handler) = self.proxy_handler { + println!("starting proxy server"); + let proxy_port = get_available_port(); + start_proxy_server(proxy_handler, proxy_port, builder_authrpc_port).await?; + builder_authrpc_port = proxy_port + }; + let builder_url = format!("http://localhost:{builder_authrpc_port}/"); + println!("proxy authrpc: {builder_url}"); + + // Start Rollup-boost instance + let mut rollup_boost = RollupBoostConfig::default(); + rollup_boost.args.lib.l2_client.l2_url = l2.auth_rpc().await?; + rollup_boost.args.lib.builder.builder_url = builder_url.try_into().unwrap(); + rollup_boost.args.log_file = Some(rollup_boost_log_file_path); + rollup_boost.args.lib.external_state_root = self.external_state_root; + if let Some(allow_traffic) = self.ignore_unhealthy_builders { + rollup_boost.args.lib.ignore_unhealthy_builders = allow_traffic; + } + if let Some(interval) = self.max_unsafe_interval { + rollup_boost.args.lib.max_unsafe_interval = interval; + } + let rollup_boost = rollup_boost.start().await; + println!("rollup-boost authrpc: {}", rollup_boost.rpc_endpoint()); + println!("rollup-boost metrics: {}", rollup_boost.metrics_endpoint()); + + Ok(RollupBoostTestHarness { l2, builder, rollup_boost, genesis }) + } +} + +impl RollupBoostTestHarness { + pub async fn block_generator(&self) -> eyre::Result { + let validator = + BlockBuilderCreatorValidator::new(self.rollup_boost.args().log_file.clone().unwrap()); + + let engine_api = EngineApi::new(&self.rollup_boost.rpc_endpoint(), JWT_SECRET)?; + + let mut block_creator = + SimpleBlockGenerator::new(validator, engine_api, self.genesis.clone()); + block_creator.init().await?; + Ok(block_creator) + } + + pub fn engine_api(&self) -> eyre::Result { + EngineApi::new(&self.rollup_boost.rpc_endpoint(), JWT_SECRET) + } + + pub async fn debug_client(&self) -> DebugClient { + DebugClient::new(&self.rollup_boost.debug_endpoint()).unwrap() + } +} + +/// A simple system that continuously generates empty blocks using the engine API +pub struct SimpleBlockGenerator { + validator: BlockBuilderCreatorValidator, + engine_api: EngineApi, + latest_hash: B256, + timestamp: u64, + genesis: Genesis, + current_block_number: u64, + block_time: Duration, +} + +impl SimpleBlockGenerator { + pub fn new( + validator: BlockBuilderCreatorValidator, + engine_api: EngineApi, + genesis: Genesis, + ) -> Self { + Self { + validator, + engine_api, + latest_hash: B256::ZERO, // temporary value + timestamp: genesis.timestamp, + genesis, + current_block_number: 0, + block_time: Duration::from_secs(1), + } + } + + pub fn set_block_time(&mut self, block_time: Duration) { + self.block_time = block_time; + } + + /// Initialize the block generator by fetching the latest block + pub async fn init(&mut self) -> eyre::Result<()> { + let latest_block = self.engine_api.latest().await?.context("block not found")?; + self.latest_hash = latest_block.header.hash; + self.timestamp = latest_block.header.timestamp; + Ok(()) + } + + /// Generate a single new block and return its hash + pub async fn generate_block( + &mut self, + empty_blocks: bool, + ) -> eyre::Result<(B256, PayloadSource)> { + let timestamp = self.timestamp + self.genesis.block_time; + self.current_block_number += 1; + + let version = match self.genesis.isthmus_block { + Some(num) => { + if self.current_block_number < num { + PayloadVersion::V3 + } else { + PayloadVersion::V4 + } + } + None => PayloadVersion::V3, + }; + + let txns = match version { + PayloadVersion::V4 => { + // Starting on the Ishtmus hardfork, the payload attributes must include a + // "BlockInfo" transaction which is a deposit transaction with info + // about the gas fees on L1. Op-Reth will fail to process the block + // if the state resulting from executing this transaction is not set + // in REVM. + let tx = create_deposit_tx(); + Some(vec![tx]) + } + _ => None, + }; + + // Submit forkchoice update with payload attributes for the next block + let result = self + .engine_api + .update_forkchoice( + self.latest_hash, + self.latest_hash, + Some(OpPayloadAttributes { + payload_attributes: PayloadAttributes { + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + timestamp, + prev_randao: B256::ZERO, + suggested_fee_recipient: Default::default(), + }, + transactions: txns, + no_tx_pool: Some(empty_blocks), + gas_limit: Some(10000000000), + eip_1559_params: None, + min_base_fee: None, + }), + ) + .await?; + + let payload_id = result.payload_id.context("missing payload id")?; + + if !empty_blocks { + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + } + + let payload = self.engine_api.get_payload(version, payload_id).await?; + + // Submit the new payload to the node + let validation_status = + self.engine_api.new_payload(NewPayload::from(payload.clone())).await?; + + if validation_status.status != PayloadStatusEnum::Valid { + return Err(eyre::eyre!("Invalid payload status")); + } + + let execution_payload = ExecutionPayload::from(payload); + let new_block_hash = execution_payload.block_hash(); + + // Update the chain's head + self.engine_api.update_forkchoice(self.latest_hash, new_block_hash, None).await?; + + // Update internal state + self.latest_hash = new_block_hash; + self.timestamp = execution_payload.timestamp(); + + // Check who built the block in the rollup-boost logs + let block_creator = self + .validator + .get_block_creator(new_block_hash) + .await? + .context("block creator not found")?; + + Ok((new_block_hash, block_creator)) + } + + pub async fn generate_builder_blocks(&mut self, num_blocks: u64) -> eyre::Result<()> { + for _ in 0..num_blocks { + let (_block, block_creator) = self.generate_block(false).await?; + if !block_creator.is_builder() { + eyre::bail!("Block creator should be the builder"); + } + } + Ok(()) + } +} + +pub struct BlockBuilderCreatorValidator { + file: PathBuf, +} + +impl BlockBuilderCreatorValidator { + pub fn new(file: PathBuf) -> Self { + Self { file } + } +} + +impl BlockBuilderCreatorValidator { + pub async fn get_block_creator(&self, block_hash: B256) -> eyre::Result> { + let contents = std::fs::read_to_string(&self.file)?; + + let search_query = format!("returning block hash={block_hash:#x}"); + + // Find the log line containing the block hash + for line in contents.lines() { + if line.contains(&search_query) { + // Extract the context=X part + if let Some(context_start) = line.find("context=") { + let context = line[context_start..] + .split_whitespace() + .next() + .ok_or(eyre::eyre!("no context found"))? + .split('=') + .nth(1) + .ok_or(eyre::eyre!("no context found"))?; + + match context { + "builder" => return Ok(Some(PayloadSource::Builder)), + "l2" => return Ok(Some(PayloadSource::L2)), + _ => panic!("Unknown context: {context}"), + } + } else { + panic!("no context found"); + } + } + } + + Ok(None) + } +} + +fn create_deposit_tx() -> Bytes { + // Extracted from a Isthmus enabled chain running in builder-playground + const ISTHMUS_DATA: &[u8] = &hex!( + "098999be00000558000c5fc500000000000000030000000067a9f765000000000000002900000000000000000000000000000000000000000000000000000000006a6d09000000000000000000000000000000000000000000000000000000000000000172fcc8e8886636bdbe96ba0e4baab67ea7e7811633f52b52e8cf7a5123213b6f000000000000000000000000d3f2c5afb2d76f5579f326b0cd7da5f5a4126c3500004e2000000000000001f4" + ); + + let deposit_tx = TxDeposit { + source_hash: B256::default(), + from: address!("DeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001"), + to: TxKind::Call(address!("4200000000000000000000000000000000000015")), + mint: 0, + value: U256::default(), + gas_limit: 210000, + is_system_transaction: true, + input: ISTHMUS_DATA.into(), + }; + + let mut buffer_without_header = BytesMut::new(); + deposit_tx.encode_2718(&mut buffer_without_header); + + buffer_without_header.to_vec().into() +} + +pub fn get_available_port() -> u16 { + static CLAIMED_PORTS: LazyLock>> = + LazyLock::new(|| Mutex::new(HashSet::new())); + loop { + let port: u16 = rand::random_range(1000..20000); + if TcpListener::bind(("127.0.0.1", port)).is_ok() && CLAIMED_PORTS.lock().insert(port) { + return port; + } + } +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/tests/common/proxy.rs b/rust/rollup-boost/crates/rollup-boost/src/tests/common/proxy.rs new file mode 100644 index 0000000000000..960225a053212 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/tests/common/proxy.rs @@ -0,0 +1,140 @@ +use bytes::Bytes; +use http::header; +use http_body_util::{BodyExt, Full, combinators::BoxBody}; +use hyper::{ + Request, Response, client::conn::http1::Builder, server::conn::http1, service::service_fn, +}; +use hyper_util::rt::TokioIo; +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use std::{net::SocketAddr, pin::Pin, sync::Arc}; +use tokio::net::{TcpListener, TcpStream}; + +#[derive(Debug, Serialize, Deserialize, Clone)] +struct JsonRpcRequest { + jsonrpc: String, + method: String, + params: Value, + id: Value, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +struct JsonRpcResponse { + jsonrpc: String, + #[serde(default)] + result: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + error: Option, + id: Value, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct JsonRpcError { + code: i32, + message: String, + data: Option, +} + +pub trait BuilderProxyHandler: Send + Sync + 'static { + fn handle( + &self, + method: String, + params: Value, + result: Value, + ) -> Pin> + Send>>; +} + +// Structure to hold the target address that we'll pass to the proxy function +#[derive(Clone)] +struct ProxyConfig { + target_addr: SocketAddr, + handler: Arc, +} + +async fn proxy( + config: ProxyConfig, + req: Request, +) -> Result>, Box> { + let (parts, body) = req.into_parts(); + let bytes = body.collect().await?.to_bytes(); + + let json_rpc_request = serde_json::from_slice::(&bytes).unwrap(); + let req = Request::from_parts(parts, Full::new(bytes)); + + let stream = TcpStream::connect(config.target_addr).await?; + let io = TokioIo::new(stream); + + let (mut sender, conn) = + Builder::new().preserve_header_case(true).title_case_headers(true).handshake(io).await?; + + tokio::task::spawn(async move { + if let Err(err) = conn.await { + println!("Connection failed: {err:?}"); + } + }); + + let resp = sender.send_request(req).await?; + + let (parts, body) = resp.into_parts(); + let bytes = body.collect().await?.to_bytes(); + + let json_rpc_response = serde_json::from_slice::(&bytes).unwrap(); + let bytes = if let Some(result) = json_rpc_response.clone().result { + let value = + config.handler.handle(json_rpc_request.method, json_rpc_request.params, result).await; + if let Some(value) = value { + // If the handler returns a value, we replace the result with the new value + // The callback only returns the result of the jsonrpc request so we have to wrap it up + // again in a JsonRpcResponse + let mut new_json_rpc_resp = json_rpc_response; + new_json_rpc_resp.result = Some(value); + Bytes::from(serde_json::to_vec(&new_json_rpc_resp).unwrap()) + } else { + // If the handler returns None, we return the original response + bytes + } + } else { + bytes + }; + + let bytes_len = bytes.len(); + let mut resp = Response::from_parts(parts, Full::new(bytes).map_err(|_| unreachable!())); + + // We have to update the content length to the new bytes length + resp.headers_mut().insert(header::CONTENT_LENGTH, bytes_len.into()); + + Ok(resp.map(|b| b.boxed())) +} + +pub async fn start_proxy_server( + handler: Arc, + listen_port: u16, + target_port: u16, +) -> eyre::Result<()> { + let listen_addr = SocketAddr::from(([127, 0, 0, 1], listen_port)); + let target_addr = SocketAddr::from(([127, 0, 0, 1], target_port)); + + let config = ProxyConfig { target_addr, handler }; + let listener = TcpListener::bind(listen_addr).await?; + + tokio::spawn(async move { + loop { + let (stream, _) = listener.accept().await.unwrap(); + let io = TokioIo::new(stream); + let config = config.clone(); + + tokio::task::spawn(async move { + if let Err(err) = http1::Builder::new() + .preserve_header_case(true) + .title_case_headers(true) + .serve_connection(io, service_fn(move |req| proxy(config.clone(), req))) + .await + { + println!("Failed to serve connection: {err:?}"); + } + }); + } + }); + + Ok(()) +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/tests/common/services/mod.rs b/rust/rollup-boost/crates/rollup-boost/src/tests/common/services/mod.rs new file mode 100644 index 0000000000000..41ef9049e0542 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/tests/common/services/mod.rs @@ -0,0 +1,2 @@ +pub mod op_reth; +pub mod rollup_boost; diff --git a/rust/rollup-boost/crates/rollup-boost/src/tests/common/services/op_reth.rs b/rust/rollup-boost/crates/rollup-boost/src/tests/common/services/op_reth.rs new file mode 100644 index 0000000000000..85bd7f668e287 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/tests/common/services/op_reth.rs @@ -0,0 +1,176 @@ +use http::Uri; +use std::{borrow::Cow, collections::HashMap, path::PathBuf}; +use testcontainers::{ + ContainerAsync, CopyToContainer, Image, + core::{ContainerPort, WaitFor}, +}; + +use crate::tests::common::TEST_DATA; + +const NAME: &str = "ghcr.io/paradigmxyz/op-reth"; +const TAG: &str = "v1.3.12"; + +pub const AUTH_RPC_PORT: u16 = 8551; +pub const P2P_PORT: u16 = 30303; + +#[derive(Debug, Clone)] +pub struct OpRethConfig { + jwt_secret: PathBuf, + p2p_secret: Option, + pub trusted_peers: Vec, + pub color: String, + pub ipcdisable: bool, + pub env_vars: HashMap, + pub genesis: Option, +} + +impl Default for OpRethConfig { + fn default() -> Self { + Self { + jwt_secret: PathBuf::from(format!("{}/jwt_secret.hex", *TEST_DATA)), + p2p_secret: None, + trusted_peers: vec![], + color: "never".to_string(), + ipcdisable: true, + env_vars: Default::default(), + genesis: None, + } + } +} + +impl OpRethConfig { + pub fn set_trusted_peers(mut self, trusted_peers: Vec) -> Self { + self.trusted_peers = trusted_peers; + self + } + + pub fn set_jwt_secret(mut self, jwt_secret: PathBuf) -> Self { + self.jwt_secret = jwt_secret; + self + } + + pub fn set_p2p_secret(mut self, p2p_secret: Option) -> Self { + self.p2p_secret = p2p_secret; + self + } + + pub fn set_genesis(mut self, genesis: String) -> Self { + self.genesis = Some(genesis); + self + } + + pub fn build(self) -> eyre::Result { + let genesis = + self.genesis.clone().ok_or_else(|| eyre::eyre!("Genesis configuration not found"))?; + + let mut copy_to_sources = vec![ + CopyToContainer::new( + std::fs::read_to_string(&self.jwt_secret)?.into_bytes(), + "/jwt_secret.hex".to_string(), + ), + CopyToContainer::new(genesis.into_bytes(), "/genesis.json".to_string()), + ]; + + if let Some(p2p_secret) = &self.p2p_secret { + let p2p_string = std::fs::read_to_string(p2p_secret).unwrap().replace("\n", ""); + copy_to_sources + .push(CopyToContainer::new(p2p_string.into_bytes(), "/p2p_secret.hex".to_string())); + } + + let expose_ports = vec![]; + + Ok(OpRethImage { config: self, copy_to_sources, expose_ports }) + } +} + +impl OpRethImage { + pub fn config(&self) -> &OpRethConfig { + &self.config + } +} + +#[derive(Debug, Clone)] +pub struct OpRethImage { + config: OpRethConfig, + copy_to_sources: Vec, + expose_ports: Vec, +} + +impl Image for OpRethImage { + fn name(&self) -> &str { + NAME + } + + fn tag(&self) -> &str { + TAG + } + + fn ready_conditions(&self) -> Vec { + vec![WaitFor::message_on_stdout("Starting consensus")] + } + + fn env_vars( + &self, + ) -> impl IntoIterator>, impl Into>)> { + &self.config.env_vars + } + + fn copy_to_sources(&self) -> impl IntoIterator { + self.copy_to_sources.iter() + } + + fn cmd(&self) -> impl IntoIterator>> { + let mut cmd = vec![ + "node".to_string(), + "--port=30303".to_string(), + "--addr=0.0.0.0".to_string(), + "--http".to_string(), + "--http.addr=0.0.0.0".to_string(), + "--http.api=eth,net,web3,debug,miner".to_string(), + "--authrpc.port=8551".to_string(), + "--authrpc.addr=0.0.0.0".to_string(), + "--authrpc.jwtsecret=/jwt_secret.hex".to_string(), + "--chain=/genesis.json".to_string(), + "--log.stdout.filter=trace".to_string(), + "-vvvvv".to_string(), + "--disable-discovery".to_string(), + "--color".to_string(), + self.config.color.clone(), + ]; + if self.config.p2p_secret.is_some() { + cmd.push("--p2p-secret-key=/p2p_secret.hex".to_string()); + } + if !self.config.trusted_peers.is_empty() { + println!("Trusted peers: {:?}", self.config.trusted_peers); + cmd.extend(["--trusted-peers".to_string(), self.config.trusted_peers.join(",")]); + } + if self.config.ipcdisable { + cmd.push("--ipcdisable".to_string()); + } + cmd + } + + fn expose_ports(&self) -> &[ContainerPort] { + &self.expose_ports + } +} + +pub trait OpRethMethods { + async fn auth_rpc(&self) -> eyre::Result; + async fn auth_rpc_port(&self) -> eyre::Result; +} + +impl OpRethMethods for ContainerAsync { + async fn auth_rpc_port(&self) -> eyre::Result { + Ok(self.get_host_port_ipv4(AUTH_RPC_PORT).await?) + } + + async fn auth_rpc(&self) -> eyre::Result { + Ok(format!( + "http://{}:{}", + self.get_host().await?, + self.get_host_port_ipv4(AUTH_RPC_PORT).await? + ) + .parse()?) + } +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/tests/common/services/rollup_boost.rs b/rust/rollup-boost/crates/rollup-boost/src/tests/common/services/rollup_boost.rs new file mode 100644 index 0000000000000..b4cfbe20293f6 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/tests/common/services/rollup_boost.rs @@ -0,0 +1,94 @@ +use std::{fs::File, time::Duration}; + +use crate::RollupBoostServiceArgs; +use tokio::task::JoinHandle; +use tracing::subscriber::DefaultGuard; +use tracing_subscriber::fmt; + +use crate::tests::common::{TEST_DATA, get_available_port}; + +#[derive(Debug)] +pub struct RollupBoost { + args: RollupBoostServiceArgs, + pub _handle: JoinHandle>, + pub _tracing_guard: DefaultGuard, +} + +impl RollupBoost { + pub fn args(&self) -> &RollupBoostServiceArgs { + &self.args + } + + pub fn rpc_endpoint(&self) -> String { + format!("http://localhost:{}", self.args.rpc_port) + } + + pub fn metrics_endpoint(&self) -> String { + format!("http://localhost:{}", self.args.metrics_port) + } + + pub fn debug_endpoint(&self) -> String { + format!("http://localhost:{}", self.args.debug_server_port) + } + + pub async fn get_metrics(&self) -> eyre::Result { + let response = reqwest::get(self.metrics_endpoint() + "/metrics").await?; + let body = response.text().await?; + Ok(body) + } +} + +#[derive(Clone, Debug)] +pub struct RollupBoostConfig { + pub args: RollupBoostServiceArgs, +} + +impl Default for RollupBoostConfig { + fn default() -> Self { + let mut args = ::parse_from([ + "rollup-boost", + &format!("--l2-jwt-path={}/jwt_secret.hex", *TEST_DATA), + &format!("--builder-jwt-path={}/jwt_secret.hex", *TEST_DATA), + "--log-level=trace", + "--health-check-interval=1", // Set health check interval to 1 second for tests + "--max-unsafe-interval=60", // Increase max unsafe interval for tests + ]); + + args.rpc_port = get_available_port(); + args.metrics_port = get_available_port(); + args.debug_server_port = get_available_port(); + + Self { args } + } +} + +impl RollupBoostConfig { + pub async fn start(self) -> RollupBoost { + let args = self.args.clone(); + + // Create a custom log subscriber only for this task + let log_file = args.log_file.as_ref().unwrap(); + let file = File::create(log_file).unwrap(); + + let subscriber = fmt::Subscriber::builder() + .with_writer(file) + .with_max_level(tracing::Level::DEBUG) + .with_ansi(false) + .finish(); + + let guard = tracing::subscriber::set_default(subscriber); + + let _handle = tokio::spawn(async move { + let res = args.clone().run().await; + if let Err(e) = &res { + eprintln!("Error: {e:?}"); + } + res + }); + + // Allow some time for the app to startup + tokio::time::sleep(Duration::from_secs(4)).await; + + RollupBoost { args: self.args, _handle, _tracing_guard: guard } + } +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/tests/common/test_data/genesis.json b/rust/rollup-boost/crates/rollup-boost/src/tests/common/test_data/genesis.json new file mode 100644 index 0000000000000..e9510b99888ec --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/tests/common/test_data/genesis.json @@ -0,0 +1,898 @@ +{ + "alloc": { + "0000000000000000000000000000000000000000": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000001": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000002": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000003": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000004": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000005": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000006": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000007": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000008": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000009": { + "balance": "0x1" + }, + "000000000000000000000000000000000000000a": { + "balance": "0x1" + }, + "000000000000000000000000000000000000000b": { + "balance": "0x1" + }, + "000000000000000000000000000000000000000c": { + "balance": "0x1" + }, + "000000000000000000000000000000000000000d": { + "balance": "0x1" + }, + "000000000000000000000000000000000000000e": { + "balance": "0x1" + }, + "000000000000000000000000000000000000000f": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000010": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000011": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000012": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000013": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000014": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000015": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000016": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000017": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000018": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000019": { + "balance": "0x1" + }, + "000000000000000000000000000000000000001a": { + "balance": "0x1" + }, + "000000000000000000000000000000000000001b": { + "balance": "0x1" + }, + "000000000000000000000000000000000000001c": { + "balance": "0x1" + }, + "000000000000000000000000000000000000001d": { + "balance": "0x1" + }, + "000000000000000000000000000000000000001e": { + "balance": "0x1" + }, + "000000000000000000000000000000000000001f": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000020": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000021": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000022": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000023": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000024": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000025": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000026": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000027": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000028": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000029": { + "balance": "0x1" + }, + "000000000000000000000000000000000000002a": { + "balance": "0x1" + }, + "000000000000000000000000000000000000002b": { + "balance": "0x1" + }, + "000000000000000000000000000000000000002c": { + "balance": "0x1" + }, + "000000000000000000000000000000000000002d": { + "balance": "0x1" + }, + "000000000000000000000000000000000000002e": { + "balance": "0x1" + }, + "000000000000000000000000000000000000002f": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000030": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000031": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000032": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000033": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000034": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000035": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000036": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000037": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000038": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000039": { + "balance": "0x1" + }, + "000000000000000000000000000000000000003a": { + "balance": "0x1" + }, + "000000000000000000000000000000000000003b": { + "balance": "0x1" + }, + "000000000000000000000000000000000000003c": { + "balance": "0x1" + }, + "000000000000000000000000000000000000003d": { + "balance": "0x1" + }, + "000000000000000000000000000000000000003e": { + "balance": "0x1" + }, + "000000000000000000000000000000000000003f": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000040": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000041": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000042": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000043": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000044": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000045": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000046": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000047": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000048": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000049": { + "balance": "0x1" + }, + "000000000000000000000000000000000000004a": { + "balance": "0x1" + }, + "000000000000000000000000000000000000004b": { + "balance": "0x1" + }, + "000000000000000000000000000000000000004c": { + "balance": "0x1" + }, + "000000000000000000000000000000000000004d": { + "balance": "0x1" + }, + "000000000000000000000000000000000000004e": { + "balance": "0x1" + }, + "000000000000000000000000000000000000004f": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000050": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000051": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000052": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000053": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000054": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000055": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000056": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000057": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000058": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000059": { + "balance": "0x1" + }, + "000000000000000000000000000000000000005a": { + "balance": "0x1" + }, + "000000000000000000000000000000000000005b": { + "balance": "0x1" + }, + "000000000000000000000000000000000000005c": { + "balance": "0x1" + }, + "000000000000000000000000000000000000005d": { + "balance": "0x1" + }, + "000000000000000000000000000000000000005e": { + "balance": "0x1" + }, + "000000000000000000000000000000000000005f": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000060": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000061": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000062": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000063": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000064": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000065": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000066": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000067": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000068": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000069": { + "balance": "0x1" + }, + "000000000000000000000000000000000000006a": { + "balance": "0x1" + }, + "000000000000000000000000000000000000006b": { + "balance": "0x1" + }, + "000000000000000000000000000000000000006c": { + "balance": "0x1" + }, + "000000000000000000000000000000000000006d": { + "balance": "0x1" + }, + "000000000000000000000000000000000000006e": { + "balance": "0x1" + }, + "000000000000000000000000000000000000006f": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000070": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000071": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000072": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000073": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000074": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000075": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000076": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000077": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000078": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000079": { + "balance": "0x1" + }, + "000000000000000000000000000000000000007a": { + "balance": "0x1" + }, + "000000000000000000000000000000000000007b": { + "balance": "0x1" + }, + "000000000000000000000000000000000000007c": { + "balance": "0x1" + }, + "000000000000000000000000000000000000007d": { + "balance": "0x1" + }, + "000000000000000000000000000000000000007e": { + "balance": "0x1" + }, + "000000000000000000000000000000000000007f": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000080": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000081": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000082": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000083": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000084": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000085": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000086": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000087": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000088": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000089": { + "balance": "0x1" + }, + "000000000000000000000000000000000000008a": { + "balance": "0x1" + }, + "000000000000000000000000000000000000008b": { + "balance": "0x1" + }, + "000000000000000000000000000000000000008c": { + "balance": "0x1" + }, + "000000000000000000000000000000000000008d": { + "balance": "0x1" + }, + "000000000000000000000000000000000000008e": { + "balance": "0x1" + }, + "000000000000000000000000000000000000008f": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000090": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000091": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000092": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000093": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000094": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000095": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000096": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000097": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000098": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000099": { + "balance": "0x1" + }, + "000000000000000000000000000000000000009a": { + "balance": "0x1" + }, + "000000000000000000000000000000000000009b": { + "balance": "0x1" + }, + "000000000000000000000000000000000000009c": { + "balance": "0x1" + }, + "000000000000000000000000000000000000009d": { + "balance": "0x1" + }, + "000000000000000000000000000000000000009e": { + "balance": "0x1" + }, + "000000000000000000000000000000000000009f": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000a0": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000a1": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000a2": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000a3": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000a4": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000a5": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000a6": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000a7": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000a8": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000a9": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000aa": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000ab": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000ac": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000ad": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000ae": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000af": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000b0": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000b1": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000b2": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000b3": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000b4": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000b5": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000b6": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000b7": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000b8": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000b9": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000ba": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000bb": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000bc": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000bd": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000be": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000bf": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000c0": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000c1": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000c2": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000c3": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000c4": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000c5": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000c6": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000c7": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000c8": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000c9": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000ca": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000cb": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000cc": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000cd": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000ce": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000cf": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000d0": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000d1": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000d2": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000d3": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000d4": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000d5": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000d6": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000d7": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000d8": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000d9": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000da": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000db": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000dc": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000dd": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000de": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000df": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000e0": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000e1": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000e2": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000e3": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000e4": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000e5": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000e6": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000e7": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000e8": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000e9": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000ea": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000eb": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000ec": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000ed": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000ee": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000ef": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000f0": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000f1": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000f2": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000f3": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000f4": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000f5": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000f6": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000f7": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000f8": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000f9": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000fa": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000fb": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000fc": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000fd": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000fe": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000ff": { + "balance": "0x1" + }, + "000000000022d473030f116ddee9f6b43ac78ba3": { + "balance": "0x0", + "code": "0x6040608081526004908136101561001557600080fd5b600090813560e01c80630d58b1db1461126c578063137c29fe146110755780632a2d80d114610db75780632b67b57014610bde57806330f28b7a14610ade5780633644e51514610a9d57806336c7851614610a285780633ff9dcb1146109a85780634fe02b441461093f57806365d9723c146107ac57806387517c451461067a578063927da105146105c3578063cc53287f146104a3578063edd9444b1461033a5763fe8ec1a7146100c657600080fd5b346103365760c07ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc3601126103365767ffffffffffffffff833581811161033257610114903690860161164b565b60243582811161032e5761012b903690870161161a565b6101336114e6565b9160843585811161032a5761014b9036908a016115c1565b98909560a43590811161032657610164913691016115c1565b969095815190610173826113ff565b606b82527f5065726d697442617463685769746e6573735472616e7366657246726f6d285460208301527f6f6b656e5065726d697373696f6e735b5d207065726d69747465642c61646472838301527f657373207370656e6465722c75696e74323536206e6f6e63652c75696e74323560608301527f3620646561646c696e652c000000000000000000000000000000000000000000608083015282519a8b9181610222602085018096611f93565b918237018a8152039961025b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09b8c8101835282611437565b5190209085515161026b81611ebb565b908a5b8181106102f95750506102f6999a6102ed9183516102a081610294602082018095611f66565b03848101835282611437565b519020602089810151858b015195519182019687526040820192909252336060820152608081019190915260a081019390935260643560c08401528260e081015b03908101835282611437565b51902093611cf7565b80f35b8061031161030b610321938c5161175e565b51612054565b61031b828661175e565b52611f0a565b61026e565b8880fd5b8780fd5b8480fd5b8380fd5b5080fd5b5091346103365760807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc3601126103365767ffffffffffffffff9080358281116103325761038b903690830161164b565b60243583811161032e576103a2903690840161161a565b9390926103ad6114e6565b9160643590811161049f576103c4913691016115c1565b949093835151976103d489611ebb565b98885b81811061047d5750506102f697988151610425816103f9602082018095611f66565b037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08101835282611437565b5190206020860151828701519083519260208401947ffcf35f5ac6a2c28868dc44c302166470266239195f02b0ee408334829333b7668652840152336060840152608083015260a082015260a081526102ed8161141b565b808b61031b8261049461030b61049a968d5161175e565b9261175e565b6103d7565b8680fd5b5082346105bf57602090817ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc3601126103325780359067ffffffffffffffff821161032e576104f49136910161161a565b929091845b848110610504578580f35b8061051a610515600193888861196c565b61197c565b61052f84610529848a8a61196c565b0161197c565b3389528385528589209173ffffffffffffffffffffffffffffffffffffffff80911692838b528652868a20911690818a5285528589207fffffffffffffffffffffffff000000000000000000000000000000000000000081541690558551918252848201527f89b1add15eff56b3dfe299ad94e01f2b52fbcb80ae1a3baea6ae8c04cb2b98a4853392a2016104f9565b8280fd5b50346103365760607ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261033657610676816105ff6114a0565b936106086114c3565b6106106114e6565b73ffffffffffffffffffffffffffffffffffffffff968716835260016020908152848420928816845291825283832090871683528152919020549251938316845260a083901c65ffffffffffff169084015260d09190911c604083015281906060820190565b0390f35b50346103365760807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc360112610336576106b26114a0565b906106bb6114c3565b916106c46114e6565b65ffffffffffff926064358481169081810361032a5779ffffffffffff0000000000000000000000000000000000000000947fda9fa7c1b00402c17d0161b249b1ab8bbec047c5a52207b9c112deffd817036b94338a5260016020527fffffffffffff0000000000000000000000000000000000000000000000000000858b209873ffffffffffffffffffffffffffffffffffffffff809416998a8d5260205283878d209b169a8b8d52602052868c209486156000146107a457504216925b8454921697889360a01b16911617179055815193845260208401523392a480f35b905092610783565b5082346105bf5760607ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc3601126105bf576107e56114a0565b906107ee6114c3565b9265ffffffffffff604435818116939084810361032a57338852602091600183528489209673ffffffffffffffffffffffffffffffffffffffff80911697888b528452858a20981697888a5283528489205460d01c93848711156109175761ffff9085840316116108f05750907f55eb90d810e1700b35a8e7e25395ff7f2b2259abd7415ca2284dfb1c246418f393929133895260018252838920878a528252838920888a5282528389209079ffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffff000000000000000000000000000000000000000000000000000083549260d01b16911617905582519485528401523392a480f35b84517f24d35a26000000000000000000000000000000000000000000000000000000008152fd5b5084517f756688fe000000000000000000000000000000000000000000000000000000008152fd5b503461033657807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc360112610336578060209273ffffffffffffffffffffffffffffffffffffffff61098f6114a0565b1681528084528181206024358252845220549051908152f35b5082346105bf57817ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc3601126105bf577f3704902f963766a4e561bbaab6e6cdc1b1dd12f6e9e99648da8843b3f46b918d90359160243533855284602052818520848652602052818520818154179055815193845260208401523392a280f35b8234610a9a5760807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc360112610a9a57610a606114a0565b610a686114c3565b610a706114e6565b6064359173ffffffffffffffffffffffffffffffffffffffff8316830361032e576102f6936117a1565b80fd5b503461033657817ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261033657602090610ad7611b1e565b9051908152f35b508290346105bf576101007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc3601126105bf57610b1a3661152a565b90807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7c36011261033257610b4c611478565b9160e43567ffffffffffffffff8111610bda576102f694610b6f913691016115c1565b939092610b7c8351612054565b6020840151828501519083519260208401947f939c21a48a8dbe3a9a2404a1d46691e4d39f6583d6ec6b35714604c986d801068652840152336060840152608083015260a082015260a08152610bd18161141b565b51902091611c25565b8580fd5b509134610336576101007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261033657610c186114a0565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdc360160c08112610332576080855191610c51836113e3565b1261033257845190610c6282611398565b73ffffffffffffffffffffffffffffffffffffffff91602435838116810361049f578152604435838116810361049f57602082015265ffffffffffff606435818116810361032a5788830152608435908116810361049f576060820152815260a435938285168503610bda576020820194855260c4359087830182815260e43567ffffffffffffffff811161032657610cfe90369084016115c1565b929093804211610d88575050918591610d786102f6999a610d7e95610d238851611fbe565b90898c511690519083519260208401947ff3841cd1ff0085026a6327b620b67997ce40f282c88a8e905a7a5626e310f3d086528401526060830152608082015260808152610d70816113ff565b519020611bd9565b916120c7565b519251169161199d565b602492508a51917fcd21db4f000000000000000000000000000000000000000000000000000000008352820152fd5b5091346103365760607ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc93818536011261033257610df36114a0565b9260249081359267ffffffffffffffff9788851161032a578590853603011261049f578051978589018981108282111761104a578252848301358181116103265785019036602383011215610326578382013591610e50836115ef565b90610e5d85519283611437565b838252602093878584019160071b83010191368311611046578801905b828210610fe9575050508a526044610e93868801611509565b96838c01978852013594838b0191868352604435908111610fe557610ebb90369087016115c1565b959096804211610fba575050508998995151610ed681611ebb565b908b5b818110610f9757505092889492610d7892610f6497958351610f02816103f98682018095611f66565b5190209073ffffffffffffffffffffffffffffffffffffffff9a8b8b51169151928551948501957faf1b0d30d2cab0380e68f0689007e3254993c596f2fdd0aaa7f4d04f794408638752850152830152608082015260808152610d70816113ff565b51169082515192845b848110610f78578580f35b80610f918585610f8b600195875161175e565b5161199d565b01610f6d565b80610311610fac8e9f9e93610fb2945161175e565b51611fbe565b9b9a9b610ed9565b8551917fcd21db4f000000000000000000000000000000000000000000000000000000008352820152fd5b8a80fd5b6080823603126110465785608091885161100281611398565b61100b85611509565b8152611018838601611509565b838201526110278a8601611607565b8a8201528d611037818701611607565b90820152815201910190610e7a565b8c80fd5b84896041867f4e487b7100000000000000000000000000000000000000000000000000000000835252fd5b5082346105bf576101407ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc3601126105bf576110b03661152a565b91807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7c360112610332576110e2611478565b67ffffffffffffffff93906101043585811161049f5761110590369086016115c1565b90936101243596871161032a57611125610bd1966102f6983691016115c1565b969095825190611134826113ff565b606482527f5065726d69745769746e6573735472616e7366657246726f6d28546f6b656e5060208301527f65726d697373696f6e73207065726d69747465642c6164647265737320737065848301527f6e6465722c75696e74323536206e6f6e63652c75696e7432353620646561646c60608301527f696e652c0000000000000000000000000000000000000000000000000000000060808301528351948591816111e3602085018096611f93565b918237018b8152039361121c7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe095868101835282611437565b5190209261122a8651612054565b6020878101518589015195519182019687526040820192909252336060820152608081019190915260a081019390935260e43560c08401528260e081016102e1565b5082346105bf576020807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261033257813567ffffffffffffffff92838211610bda5736602383011215610bda5781013592831161032e576024906007368386831b8401011161049f57865b8581106112e5578780f35b80821b83019060807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdc83360301126103265761139288876001946060835161132c81611398565b611368608461133c8d8601611509565b9485845261134c60448201611509565b809785015261135d60648201611509565b809885015201611509565b918291015273ffffffffffffffffffffffffffffffffffffffff80808093169516931691166117a1565b016112da565b6080810190811067ffffffffffffffff8211176113b457604052565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6060810190811067ffffffffffffffff8211176113b457604052565b60a0810190811067ffffffffffffffff8211176113b457604052565b60c0810190811067ffffffffffffffff8211176113b457604052565b90601f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0910116810190811067ffffffffffffffff8211176113b457604052565b60c4359073ffffffffffffffffffffffffffffffffffffffff8216820361149b57565b600080fd5b6004359073ffffffffffffffffffffffffffffffffffffffff8216820361149b57565b6024359073ffffffffffffffffffffffffffffffffffffffff8216820361149b57565b6044359073ffffffffffffffffffffffffffffffffffffffff8216820361149b57565b359073ffffffffffffffffffffffffffffffffffffffff8216820361149b57565b7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc01906080821261149b576040805190611563826113e3565b8082941261149b57805181810181811067ffffffffffffffff8211176113b457825260043573ffffffffffffffffffffffffffffffffffffffff8116810361149b578152602435602082015282526044356020830152606435910152565b9181601f8401121561149b5782359167ffffffffffffffff831161149b576020838186019501011161149b57565b67ffffffffffffffff81116113b45760051b60200190565b359065ffffffffffff8216820361149b57565b9181601f8401121561149b5782359167ffffffffffffffff831161149b576020808501948460061b01011161149b57565b91909160608184031261149b576040805191611666836113e3565b8294813567ffffffffffffffff9081811161149b57830182601f8201121561149b578035611693816115ef565b926116a087519485611437565b818452602094858086019360061b8501019381851161149b579086899897969594939201925b8484106116e3575050505050855280820135908501520135910152565b90919293949596978483031261149b578851908982019082821085831117611730578a928992845261171487611509565b81528287013583820152815201930191908897969594936116c6565b602460007f4e487b710000000000000000000000000000000000000000000000000000000081526041600452fd5b80518210156117725760209160051b010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b92919273ffffffffffffffffffffffffffffffffffffffff604060008284168152600160205282828220961695868252602052818120338252602052209485549565ffffffffffff8760a01c16804211611884575082871696838803611812575b5050611810955016926118b5565b565b878484161160001461184f57602488604051907ff96fb0710000000000000000000000000000000000000000000000000000000082526004820152fd5b7fffffffffffffffffffffffff000000000000000000000000000000000000000084846118109a031691161790553880611802565b602490604051907fd81b2f2e0000000000000000000000000000000000000000000000000000000082526004820152fd5b9060006064926020958295604051947f23b872dd0000000000000000000000000000000000000000000000000000000086526004860152602485015260448401525af13d15601f3d116001600051141617161561190e57565b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601460248201527f5452414e534645525f46524f4d5f4641494c45440000000000000000000000006044820152fd5b91908110156117725760061b0190565b3573ffffffffffffffffffffffffffffffffffffffff8116810361149b5790565b9065ffffffffffff908160608401511673ffffffffffffffffffffffffffffffffffffffff908185511694826020820151169280866040809401511695169560009187835260016020528383208984526020528383209916988983526020528282209184835460d01c03611af5579185611ace94927fc6a377bfc4eb120024a8ac08eef205be16b817020812c73223e81d1bdb9708ec98979694508715600014611ad35779ffffffffffff00000000000000000000000000000000000000009042165b60a01b167fffffffffffff00000000000000000000000000000000000000000000000000006001860160d01b1617179055519384938491604091949373ffffffffffffffffffffffffffffffffffffffff606085019616845265ffffffffffff809216602085015216910152565b0390a4565b5079ffffffffffff000000000000000000000000000000000000000087611a60565b600484517f756688fe000000000000000000000000000000000000000000000000000000008152fd5b467f000000000000000000000000000000000000000000000000000000000000038503611b69577f48deb34b39fb4b41f5c195008940d5ef510cdd7853eba5807b2fa08dfd58647590565b60405160208101907f8cad95687ba82c2ce50e74f7b754645e5117c3a5bec8151c0726d5857980a86682527f9ac997416e8ff9d2ff6bebeb7149f65cdae5e32e2b90440b566bb3044041d36a604082015246606082015230608082015260808152611bd3816113ff565b51902090565b611be1611b1e565b906040519060208201927f190100000000000000000000000000000000000000000000000000000000000084526022830152604282015260428152611bd381611398565b9192909360a435936040840151804211611cc65750602084510151808611611c955750918591610d78611c6594611c60602088015186611e47565b611bd9565b73ffffffffffffffffffffffffffffffffffffffff809151511692608435918216820361149b57611810936118b5565b602490604051907f3728b83d0000000000000000000000000000000000000000000000000000000082526004820152fd5b602490604051907fcd21db4f0000000000000000000000000000000000000000000000000000000082526004820152fd5b959093958051519560409283830151804211611e175750848803611dee57611d2e918691610d7860209b611c608d88015186611e47565b60005b868110611d42575050505050505050565b611d4d81835161175e565b5188611d5a83878a61196c565b01359089810151808311611dbe575091818888886001968596611d84575b50505050505001611d31565b611db395611dad9273ffffffffffffffffffffffffffffffffffffffff6105159351169561196c565b916118b5565b803888888883611d78565b6024908651907f3728b83d0000000000000000000000000000000000000000000000000000000082526004820152fd5b600484517fff633a38000000000000000000000000000000000000000000000000000000008152fd5b6024908551907fcd21db4f0000000000000000000000000000000000000000000000000000000082526004820152fd5b9073ffffffffffffffffffffffffffffffffffffffff600160ff83161b9216600052600060205260406000209060081c6000526020526040600020818154188091551615611e9157565b60046040517f756688fe000000000000000000000000000000000000000000000000000000008152fd5b90611ec5826115ef565b611ed26040519182611437565b8281527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0611f0082946115ef565b0190602036910137565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8114611f375760010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b805160208092019160005b828110611f7f575050505090565b835185529381019392810192600101611f71565b9081519160005b838110611fab575050016000815290565b8060208092840101518185015201611f9a565b60405160208101917f65626cad6cb96493bf6f5ebea28756c966f023ab9e8a83a7101849d5573b3678835273ffffffffffffffffffffffffffffffffffffffff8082511660408401526020820151166060830152606065ffffffffffff9182604082015116608085015201511660a082015260a0815260c0810181811067ffffffffffffffff8211176113b45760405251902090565b6040516020808201927f618358ac3db8dc274f0cd8829da7e234bd48cd73c4a740aede1adec9846d06a1845273ffffffffffffffffffffffffffffffffffffffff81511660408401520151606082015260608152611bd381611398565b919082604091031261149b576020823592013590565b6000843b61222e5750604182036121ac576120e4828201826120b1565b939092604010156117725760209360009360ff6040608095013560f81c5b60405194855216868401526040830152606082015282805260015afa156121a05773ffffffffffffffffffffffffffffffffffffffff806000511691821561217657160361214c57565b60046040517f815e1d64000000000000000000000000000000000000000000000000000000008152fd5b60046040517f8baa579f000000000000000000000000000000000000000000000000000000008152fd5b6040513d6000823e3d90fd5b60408203612204576121c0918101906120b1565b91601b7f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff84169360ff1c019060ff8211611f375760209360009360ff608094612102565b60046040517f4be6321b000000000000000000000000000000000000000000000000000000008152fd5b929391601f928173ffffffffffffffffffffffffffffffffffffffff60646020957fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0604051988997889687947f1626ba7e000000000000000000000000000000000000000000000000000000009e8f8752600487015260406024870152816044870152868601378b85828601015201168101030192165afa9081156123a857829161232a575b507fffffffff000000000000000000000000000000000000000000000000000000009150160361230057565b60046040517fb0669cbc000000000000000000000000000000000000000000000000000000008152fd5b90506020813d82116123a0575b8161234460209383611437565b810103126103365751907fffffffff0000000000000000000000000000000000000000000000000000000082168203610a9a57507fffffffff0000000000000000000000000000000000000000000000000000000090386122d4565b3d9150612337565b6040513d84823e3d90fdfea164736f6c6343000811000a", + "nonce": "0x1" + }, + "0000000071727de22e5e9d8baf0edac6f37da032": { + "balance": "0x0", + "code": "0x60806040526004361015610024575b361561001957600080fd5b61002233612748565b005b60003560e01c806242dc5314611b0057806301ffc9a7146119ae5780630396cb60146116765780630bd28e3b146115fa5780631b2e01b814611566578063205c2878146113d157806322cdde4c1461136b57806335567e1a146112b35780635287ce12146111a557806370a0823114611140578063765e827f14610e82578063850aaf6214610dc35780639b249f6914610c74578063b760faf914610c3a578063bb9fe6bf14610a68578063c23a5cea146107c4578063dbed18e0146101a15763fc7e286d0361000e573461019c5760207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c5773ffffffffffffffffffffffffffffffffffffffff61013a61229f565b16600052600060205260a0604060002065ffffffffffff6001825492015460405192835260ff8116151560208401526dffffffffffffffffffffffffffff8160081c16604084015263ffffffff8160781c16606084015260981c166080820152f35b600080fd5b3461019c576101af36612317565b906101b86129bd565b60009160005b82811061056f57506101d08493612588565b6000805b8481106102fc5750507fbb47ee3e183a558b1a2ff0874b079f3fc5478b7454eacf2bfc5af2ff5878f972600080a16000809360005b81811061024757610240868660007f575ff3acadd5ab348fe1855e217e0f3678f8d767d7494c9f9fefbee2e17cca4d8180a2613ba7565b6001600255005b6102a261025582848a612796565b73ffffffffffffffffffffffffffffffffffffffff6102766020830161282a565b167f575ff3acadd5ab348fe1855e217e0f3678f8d767d7494c9f9fefbee2e17cca4d600080a2806127d6565b906000915b8083106102b957505050600101610209565b909194976102f36102ed6001926102e78c8b6102e0826102da8e8b8d61269d565b9261265a565b5191613597565b90612409565b99612416565b950191906102a7565b6020610309828789612796565b61031f61031682806127d6565b9390920161282a565b9160009273ffffffffffffffffffffffffffffffffffffffff8091165b8285106103505750505050506001016101d4565b909192939561037f83610378610366848c61265a565b516103728b898b61269d565b856129f6565b9290613dd7565b9116840361050a576104a5576103958491613dd7565b9116610440576103b5576103aa600191612416565b96019392919061033c565b60a487604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152602160448201527f41413332207061796d61737465722065787069726564206f72206e6f7420647560648201527f65000000000000000000000000000000000000000000000000000000000000006084820152fd5b608488604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152601460448201527f41413334207369676e6174757265206572726f720000000000000000000000006064820152fd5b608488604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152601760448201527f414132322065787069726564206f72206e6f74206475650000000000000000006064820152fd5b608489604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152601460448201527f41413234207369676e6174757265206572726f720000000000000000000000006064820152fd5b61057a818487612796565b9361058585806127d6565b919095602073ffffffffffffffffffffffffffffffffffffffff6105aa82840161282a565b1697600192838a1461076657896105da575b5050505060019293949550906105d191612409565b939291016101be565b8060406105e892019061284b565b918a3b1561019c57929391906040519485937f2dd8113300000000000000000000000000000000000000000000000000000000855288604486016040600488015252606490818601918a60051b8701019680936000915b8c83106106e657505050505050838392610684927ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc8560009803016024860152612709565b03818a5afa90816106d7575b506106c657602486604051907f86a9f7500000000000000000000000000000000000000000000000000000000082526004820152fd5b93945084936105d1600189806105bc565b6106e0906121bd565b88610690565b91939596977fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff9c908a9294969a0301865288357ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffee18336030181121561019c57836107538793858394016128ec565b9a0196019301909189979695949261063f565b606483604051907f08c379a00000000000000000000000000000000000000000000000000000000082526004820152601760248201527f4141393620696e76616c69642061676772656761746f720000000000000000006044820152fd5b3461019c576020807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c576107fc61229f565b33600052600082526001604060002001908154916dffffffffffffffffffffffffffff8360081c16928315610a0a5765ffffffffffff8160981c1680156109ac57421061094e5760009373ffffffffffffffffffffffffffffffffffffffff859485947fffffffffffffff000000000000000000000000000000000000000000000000ff86951690556040517fb7c918e0e249f999e965cafeb6c664271b3f4317d296461500e71da39f0cbda33391806108da8786836020909392919373ffffffffffffffffffffffffffffffffffffffff60408201951681520152565b0390a2165af16108e8612450565b50156108f057005b606490604051907f08c379a00000000000000000000000000000000000000000000000000000000082526004820152601860248201527f6661696c656420746f207769746864726177207374616b6500000000000000006044820152fd5b606485604051907f08c379a00000000000000000000000000000000000000000000000000000000082526004820152601b60248201527f5374616b65207769746864726177616c206973206e6f742064756500000000006044820152fd5b606486604051907f08c379a00000000000000000000000000000000000000000000000000000000082526004820152601d60248201527f6d7573742063616c6c20756e6c6f636b5374616b6528292066697273740000006044820152fd5b606485604051907f08c379a00000000000000000000000000000000000000000000000000000000082526004820152601460248201527f4e6f207374616b6520746f2077697468647261770000000000000000000000006044820152fd5b3461019c5760007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c573360005260006020526001604060002001805463ffffffff8160781c16908115610bdc5760ff1615610b7e5765ffffffffffff908142160191818311610b4f5780547fffffffffffffff000000000000ffffffffffffffffffffffffffffffffffff001678ffffffffffff00000000000000000000000000000000000000609885901b161790556040519116815233907ffa9b3c14cc825c412c9ed81b3ba365a5b459439403f18829e572ed53a4180f0a90602090a2005b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f616c726561647920756e7374616b696e670000000000000000000000000000006044820152fd5b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600a60248201527f6e6f74207374616b6564000000000000000000000000000000000000000000006044820152fd5b60207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c57610022610c6f61229f565b612748565b3461019c5760207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c5760043567ffffffffffffffff811161019c576020610cc8610d1b9236906004016122c2565b919073ffffffffffffffffffffffffffffffffffffffff9260405194859283927f570e1a360000000000000000000000000000000000000000000000000000000084528560048501526024840191612709565b03816000857f000000000000000000000000efc2c1444ebcc4db75e7613d20c6a62ff67a167c165af1908115610db757602492600092610d86575b50604051917f6ca7b806000000000000000000000000000000000000000000000000000000008352166004820152fd5b610da991925060203d602011610db0575b610da181836121ed565b8101906126dd565b9083610d56565b503d610d97565b6040513d6000823e3d90fd5b3461019c5760407ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c57610dfa61229f565b60243567ffffffffffffffff811161019c57600091610e1e839236906004016122c2565b90816040519283928337810184815203915af4610e39612450565b90610e7e6040519283927f99410554000000000000000000000000000000000000000000000000000000008452151560048401526040602484015260448301906123c6565b0390fd5b3461019c57610e9036612317565b610e9b9291926129bd565b610ea483612588565b60005b848110610f1c57506000927fbb47ee3e183a558b1a2ff0874b079f3fc5478b7454eacf2bfc5af2ff5878f972600080a16000915b858310610eec576102408585613ba7565b909193600190610f12610f0087898761269d565b610f0a888661265a565b519088613597565b0194019190610edb565b610f47610f40610f2e8385979561265a565b51610f3a84898761269d565b846129f6565b9190613dd7565b73ffffffffffffffffffffffffffffffffffffffff929183166110db5761107657610f7190613dd7565b911661101157610f8657600101929092610ea7565b60a490604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152602160448201527f41413332207061796d61737465722065787069726564206f72206e6f7420647560648201527f65000000000000000000000000000000000000000000000000000000000000006084820152fd5b608482604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152601460448201527f41413334207369676e6174757265206572726f720000000000000000000000006064820152fd5b608483604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152601760448201527f414132322065787069726564206f72206e6f74206475650000000000000000006064820152fd5b608484604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152601460448201527f41413234207369676e6174757265206572726f720000000000000000000000006064820152fd5b3461019c5760207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c5773ffffffffffffffffffffffffffffffffffffffff61118c61229f565b1660005260006020526020604060002054604051908152f35b3461019c5760207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c5773ffffffffffffffffffffffffffffffffffffffff6111f161229f565b6000608060405161120181612155565b828152826020820152826040820152826060820152015216600052600060205260a06040600020608060405161123681612155565b6001835493848352015490602081019060ff8316151582526dffffffffffffffffffffffffffff60408201818560081c16815263ffffffff936060840193858760781c16855265ffffffffffff978891019660981c1686526040519788525115156020880152511660408601525116606084015251166080820152f35b3461019c5760407ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c5760206112ec61229f565b73ffffffffffffffffffffffffffffffffffffffff6113096122f0565b911660005260018252604060002077ffffffffffffffffffffffffffffffffffffffffffffffff821660005282526040600020547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000006040519260401b16178152f35b3461019c577ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc60208136011261019c576004359067ffffffffffffffff821161019c5761012090823603011261019c576113c9602091600401612480565b604051908152f35b3461019c5760407ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c5761140861229f565b60243590336000526000602052604060002090815491828411611508576000808573ffffffffffffffffffffffffffffffffffffffff8295839561144c848a612443565b90556040805173ffffffffffffffffffffffffffffffffffffffff831681526020810185905233917fd1c19fbcd4551a5edfb66d43d2e337c04837afda3482b42bdf569a8fccdae5fb91a2165af16114a2612450565b50156114aa57005b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f6661696c656420746f20776974686472617700000000000000000000000000006044820152fd5b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f576974686472617720616d6f756e7420746f6f206c61726765000000000000006044820152fd5b3461019c5760407ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c5761159d61229f565b73ffffffffffffffffffffffffffffffffffffffff6115ba6122f0565b9116600052600160205277ffffffffffffffffffffffffffffffffffffffffffffffff604060002091166000526020526020604060002054604051908152f35b3461019c5760207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c5760043577ffffffffffffffffffffffffffffffffffffffffffffffff811680910361019c5733600052600160205260406000209060005260205260406000206116728154612416565b9055005b6020807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c5760043563ffffffff9182821680920361019c5733600052600081526040600020928215611950576001840154908160781c1683106118f2576116f86dffffffffffffffffffffffffffff9182349160081c16612409565b93841561189457818511611836579065ffffffffffff61180592546040519061172082612155565b8152848101926001845260408201908816815260608201878152600160808401936000855233600052600089526040600020905181550194511515917fffffffffffffffffffffffffff0000000000000000000000000000000000000060ff72ffffffff0000000000000000000000000000006effffffffffffffffffffffffffff008954945160081b16945160781b1694169116171717835551167fffffffffffffff000000000000ffffffffffffffffffffffffffffffffffffff78ffffffffffff0000000000000000000000000000000000000083549260981b169116179055565b6040519283528201527fa5ae833d0bb1dcd632d98a8b70973e8516812898e19bf27b70071ebc8dc52c0160403392a2005b606483604051907f08c379a00000000000000000000000000000000000000000000000000000000082526004820152600e60248201527f7374616b65206f766572666c6f770000000000000000000000000000000000006044820152fd5b606483604051907f08c379a00000000000000000000000000000000000000000000000000000000082526004820152601260248201527f6e6f207374616b652073706563696669656400000000000000000000000000006044820152fd5b606482604051907f08c379a00000000000000000000000000000000000000000000000000000000082526004820152601c60248201527f63616e6e6f7420646563726561736520756e7374616b652074696d65000000006044820152fd5b606482604051907f08c379a00000000000000000000000000000000000000000000000000000000082526004820152601a60248201527f6d757374207370656369667920756e7374616b652064656c61790000000000006044820152fd5b3461019c5760207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c576004357fffffffff00000000000000000000000000000000000000000000000000000000811680910361019c57807f60fc6b6e0000000000000000000000000000000000000000000000000000000060209214908115611ad6575b8115611aac575b8115611a82575b8115611a58575b506040519015158152f35b7f01ffc9a70000000000000000000000000000000000000000000000000000000091501482611a4d565b7f3e84f0210000000000000000000000000000000000000000000000000000000081149150611a46565b7fcf28ef970000000000000000000000000000000000000000000000000000000081149150611a3f565b7f915074d80000000000000000000000000000000000000000000000000000000081149150611a38565b3461019c576102007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261019c5767ffffffffffffffff60043581811161019c573660238201121561019c57611b62903690602481600401359101612268565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdc36016101c0811261019c5761014060405191611b9e83612155565b1261019c5760405192611bb0846121a0565b60243573ffffffffffffffffffffffffffffffffffffffff8116810361019c578452602093604435858201526064356040820152608435606082015260a435608082015260c43560a082015260e43560c08201526101043573ffffffffffffffffffffffffffffffffffffffff8116810361019c5760e08201526101243561010082015261014435610120820152825261016435848301526101843560408301526101a43560608301526101c43560808301526101e43590811161019c57611c7c9036906004016122c2565b905a3033036120f7578351606081015195603f5a0260061c61271060a0840151890101116120ce5760009681519182611ff0575b5050505090611cca915a9003608085015101923691612268565b925a90600094845193611cdc85613ccc565b9173ffffffffffffffffffffffffffffffffffffffff60e0870151168015600014611ea957505073ffffffffffffffffffffffffffffffffffffffff855116935b5a9003019360a06060820151910151016080860151850390818111611e95575b50508302604085015192818410600014611dce5750506003811015611da157600203611d79576113c99293508093611d7481613d65565b613cf6565b5050507fdeadaa51000000000000000000000000000000000000000000000000000000008152fd5b6024857f4e487b710000000000000000000000000000000000000000000000000000000081526021600452fd5b81611dde92979396940390613c98565b506003841015611e6857507f49628fd1471006c1482da88028e9ce4dbb080b815c9b0344d39e5a8e6ec1419f60808683015192519473ffffffffffffffffffffffffffffffffffffffff865116948873ffffffffffffffffffffffffffffffffffffffff60e0890151169701519160405192835215898301528760408301526060820152a46113c9565b807f4e487b7100000000000000000000000000000000000000000000000000000000602492526021600452fd5b6064919003600a0204909301928780611d3d565b8095918051611eba575b5050611d1d565b6003861015611fc1576002860315611eb35760a088015190823b1561019c57600091611f2491836040519586809581947f7c627b210000000000000000000000000000000000000000000000000000000083528d60048401526080602484015260848301906123c6565b8b8b0260448301528b60648301520393f19081611fad575b50611fa65787893d610800808211611f9e575b506040519282828501016040528184528284013e610e7e6040519283927fad7954bc000000000000000000000000000000000000000000000000000000008452600484015260248301906123c6565b905083611f4f565b8980611eb3565b611fb89199506121bd565b6000978a611f3c565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b91600092918380938c73ffffffffffffffffffffffffffffffffffffffff885116910192f115612023575b808080611cb0565b611cca929195503d6108008082116120c6575b5060405190888183010160405280825260008983013e805161205f575b5050600194909161201b565b7f1c4fada7374c0a9ee8841fc38afe82932dc0f8e69012e927f061a8bae611a20188870151918973ffffffffffffffffffffffffffffffffffffffff8551169401516120bc604051928392835260408d84015260408301906123c6565b0390a38680612053565b905088612036565b877fdeaddead000000000000000000000000000000000000000000000000000000006000526000fd5b606486604051907f08c379a00000000000000000000000000000000000000000000000000000000082526004820152601760248201527f4141393220696e7465726e616c2063616c6c206f6e6c790000000000000000006044820152fd5b60a0810190811067ffffffffffffffff82111761217157604052565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b610140810190811067ffffffffffffffff82111761217157604052565b67ffffffffffffffff811161217157604052565b6060810190811067ffffffffffffffff82111761217157604052565b90601f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0910116810190811067ffffffffffffffff82111761217157604052565b67ffffffffffffffff811161217157601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01660200190565b9291926122748261222e565b9161228260405193846121ed565b82948184528183011161019c578281602093846000960137010152565b6004359073ffffffffffffffffffffffffffffffffffffffff8216820361019c57565b9181601f8401121561019c5782359167ffffffffffffffff831161019c576020838186019501011161019c57565b6024359077ffffffffffffffffffffffffffffffffffffffffffffffff8216820361019c57565b9060407ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc83011261019c5760043567ffffffffffffffff9283821161019c578060238301121561019c57816004013593841161019c5760248460051b8301011161019c57602401919060243573ffffffffffffffffffffffffffffffffffffffff8116810361019c5790565b60005b8381106123b65750506000910152565b81810151838201526020016123a6565b907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f602093612402815180928187528780880191016123a3565b0116010190565b91908201809211610b4f57565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8114610b4f5760010190565b91908203918211610b4f57565b3d1561247b573d906124618261222e565b9161246f60405193846121ed565b82523d6000602084013e565b606090565b604061248e8183018361284b565b90818351918237206124a3606084018461284b565b90818451918237209260c06124bb60e083018361284b565b908186519182372091845195602087019473ffffffffffffffffffffffffffffffffffffffff833516865260208301358789015260608801526080870152608081013560a087015260a081013582870152013560e08501526101009081850152835261012083019167ffffffffffffffff918484108385111761217157838252845190206101408501908152306101608601524661018086015260608452936101a00191821183831017612171575251902090565b67ffffffffffffffff81116121715760051b60200190565b9061259282612570565b6040906125a260405191826121ed565b8381527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe06125d08295612570565b019160005b8381106125e25750505050565b60209082516125f081612155565b83516125fb816121a0565b600081526000849181838201528187820152816060818184015260809282848201528260a08201528260c08201528260e082015282610100820152826101208201528652818587015281898701528501528301528286010152016125d5565b805182101561266e5760209160051b010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b919081101561266e5760051b810135907ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffee18136030182121561019c570190565b9081602091031261019c575173ffffffffffffffffffffffffffffffffffffffff8116810361019c5790565b601f82602094937fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0938186528686013760008582860101520116010190565b7f2da466a7b24304f47e87fa2e1e5a81b9831ce54fec19055ce277ca2f39ba42c4602073ffffffffffffffffffffffffffffffffffffffff61278a3485613c98565b936040519485521692a2565b919081101561266e5760051b810135907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa18136030182121561019c570190565b9035907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe18136030182121561019c570180359067ffffffffffffffff821161019c57602001918160051b3603831361019c57565b3573ffffffffffffffffffffffffffffffffffffffff8116810361019c5790565b9035907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe18136030182121561019c570180359067ffffffffffffffff821161019c5760200191813603831361019c57565b90357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe18236030181121561019c57016020813591019167ffffffffffffffff821161019c57813603831361019c57565b61012091813573ffffffffffffffffffffffffffffffffffffffff811680910361019c576129626129476129ba9561299b93855260208601356020860152612937604087018761289c565b9091806040880152860191612709565b612954606086018661289c565b908583036060870152612709565b6080840135608084015260a084013560a084015260c084013560c084015261298d60e085018561289c565b9084830360e0860152612709565b916129ac610100918281019061289c565b929091818503910152612709565b90565b60028054146129cc5760028055565b60046040517f3ee5aeb5000000000000000000000000000000000000000000000000000000008152fd5b926000905a93805194843573ffffffffffffffffffffffffffffffffffffffff811680910361019c5786526020850135602087015260808501356fffffffffffffffffffffffffffffffff90818116606089015260801c604088015260a086013560c088015260c086013590811661010088015260801c610120870152612a8060e086018661284b565b801561357b576034811061351d578060141161019c578060241161019c5760341161019c57602481013560801c60a0880152601481013560801c60808801523560601c60e08701525b612ad285612480565b60208301526040860151946effffffffffffffffffffffffffffff8660c08901511760608901511760808901511760a0890151176101008901511761012089015117116134bf57604087015160608801510160808801510160a08801510160c0880151016101008801510296835173ffffffffffffffffffffffffffffffffffffffff81511690612b66604085018561284b565b806131e4575b505060e0015173ffffffffffffffffffffffffffffffffffffffff1690600082156131ac575b6020612bd7918b828a01516000868a604051978896879586937f19822f7c00000000000000000000000000000000000000000000000000000000855260048501613db5565b0393f160009181613178575b50612c8b573d8c610800808311612c83575b50604051916020818401016040528083526000602084013e610e7e6040519283927f65c8fd4d000000000000000000000000000000000000000000000000000000008452600484015260606024840152600d60648401527f4141323320726576657274656400000000000000000000000000000000000000608484015260a0604484015260a48301906123c6565b915082612bf5565b9a92939495969798999a91156130f2575b509773ffffffffffffffffffffffffffffffffffffffff835116602084015190600052600160205260406000208160401c60005260205267ffffffffffffffff604060002091825492612cee84612416565b9055160361308d575a8503116130285773ffffffffffffffffffffffffffffffffffffffff60e0606093015116612d42575b509060a09184959697986040608096015260608601520135905a900301910152565b969550505a9683519773ffffffffffffffffffffffffffffffffffffffff60e08a01511680600052600060205260406000208054848110612fc3576080612dcd9a9b9c600093878094039055015192602089015183604051809d819582947f52b7512c0000000000000000000000000000000000000000000000000000000084528c60048501613db5565b039286f1978860009160009a612f36575b50612e86573d8b610800808311612e7e575b50604051916020818401016040528083526000602084013e610e7e6040519283927f65c8fd4d000000000000000000000000000000000000000000000000000000008452600484015260606024840152600d60648401527f4141333320726576657274656400000000000000000000000000000000000000608484015260a0604484015260a48301906123c6565b915082612df0565b9991929394959697989998925a900311612eab57509096959094939291906080612d20565b60a490604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152602760448201527f41413336206f766572207061796d6173746572566572696669636174696f6e4760648201527f61734c696d6974000000000000000000000000000000000000000000000000006084820152fd5b915098503d90816000823e612f4b82826121ed565b604081838101031261019c5780519067ffffffffffffffff821161019c57828101601f83830101121561019c578181015191612f868361222e565b93612f9460405195866121ed565b838552820160208483850101011161019c57602092612fba9184808701918501016123a3565b01519838612dde565b60848b604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152601e60448201527f41413331207061796d6173746572206465706f73697420746f6f206c6f7700006064820152fd5b608490604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152601e60448201527f41413236206f76657220766572696669636174696f6e4761734c696d697400006064820152fd5b608482604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152601a60448201527f4141323520696e76616c6964206163636f756e74206e6f6e63650000000000006064820152fd5b600052600060205260406000208054808c11613113578b9003905538612c9c565b608484604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152601760448201527f41413231206469646e2774207061792070726566756e640000000000000000006064820152fd5b9091506020813d6020116131a4575b81613194602093836121ed565b8101031261019c57519038612be3565b3d9150613187565b508060005260006020526040600020548a81116000146131d75750612bd7602060005b915050612b92565b6020612bd7918c036131cf565b833b61345a57604088510151602060405180927f570e1a360000000000000000000000000000000000000000000000000000000082528260048301528160008161323260248201898b612709565b039273ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000efc2c1444ebcc4db75e7613d20c6a62ff67a167c1690f1908115610db75760009161343b575b5073ffffffffffffffffffffffffffffffffffffffff811680156133d6578503613371573b1561330c5760141161019c5773ffffffffffffffffffffffffffffffffffffffff9183887fd51a9c61267aa6196961883ecf5ff2da6619c37dac0fa92122513fb32c032d2d604060e0958787602086015195510151168251913560601c82526020820152a391612b6c565b60848d604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152602060448201527f4141313520696e6974436f6465206d757374206372656174652073656e6465726064820152fd5b60848e604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152602060448201527f4141313420696e6974436f6465206d7573742072657475726e2073656e6465726064820152fd5b60848f604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152601b60448201527f4141313320696e6974436f6465206661696c6564206f72204f4f4700000000006064820152fd5b613454915060203d602011610db057610da181836121ed565b3861327c565b60848d604051907f220266b6000000000000000000000000000000000000000000000000000000008252600482015260406024820152601f60448201527f414131302073656e64657220616c726561647920636f6e7374727563746564006064820152fd5b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f41413934206761732076616c756573206f766572666c6f7700000000000000006044820152fd5b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f4141393320696e76616c6964207061796d6173746572416e64446174610000006044820152fd5b5050600060e087015260006080870152600060a0870152612ac9565b9092915a906060810151916040928351967fffffffff00000000000000000000000000000000000000000000000000000000886135d7606084018461284b565b600060038211613b9f575b7f8dd7712f0000000000000000000000000000000000000000000000000000000094168403613a445750505061379d6000926136b292602088015161363a8a5193849360208501528b602485015260648401906128ec565b90604483015203906136727fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0928381018352826121ed565b61379189519485927e42dc5300000000000000000000000000000000000000000000000000000000602085015261020060248501526102248401906123c6565b613760604484018b60806101a091805173ffffffffffffffffffffffffffffffffffffffff808251168652602082015160208701526040820151604087015260608201516060870152838201518487015260a082015160a087015260c082015160c087015260e08201511660e0860152610100808201519086015261012080910151908501526020810151610140850152604081015161016085015260608101516101808501520151910152565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdc83820301610204840152876123c6565b039081018352826121ed565b6020918183809351910182305af1600051988652156137bf575b505050505050565b909192939495965060003d8214613a3a575b7fdeaddead00000000000000000000000000000000000000000000000000000000810361385b57608487878051917f220266b600000000000000000000000000000000000000000000000000000000835260048301526024820152600f60448201527f41413935206f7574206f662067617300000000000000000000000000000000006064820152fd5b7fdeadaa510000000000000000000000000000000000000000000000000000000091929395949650146000146138c55750506138a961389e6138b8935a90612443565b608085015190612409565b9083015183611d748295613d65565b905b3880808080806137b7565b909261395290828601518651907ff62676f440ff169a3a9afdbf812e89e7f95975ee8e5c31214ffdef631c5f479273ffffffffffffffffffffffffffffffffffffffff9580878551169401516139483d610800808211613a32575b508a519084818301018c5280825260008583013e8a805194859485528401528a8301906123c6565b0390a35a90612443565b916139636080860193845190612409565b926000905a94829488519761397789613ccc565b948260e08b0151168015600014613a1857505050875116955b5a9003019560a06060820151910151019051860390818111613a04575b5050840290850151928184106000146139de57505080611e68575090816139d89293611d7481613d65565b906138ba565b6139ee9082849397950390613c98565b50611e68575090826139ff92613cf6565b6139d8565b6064919003600a02049094019338806139ad565b90919892509751613a2a575b50613990565b955038613a24565b905038613920565b8181803e516137d1565b613b97945082935090613a8c917e42dc53000000000000000000000000000000000000000000000000000000006020613b6b9501526102006024860152610224850191612709565b613b3a604484018860806101a091805173ffffffffffffffffffffffffffffffffffffffff808251168652602082015160208701526040820151604087015260608201516060870152838201518487015260a082015160a087015260c082015160c087015260e08201511660e0860152610100808201519086015261012080910151908501526020810151610140850152604081015161016085015260608101516101808501520151910152565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdc83820301610204840152846123c6565b037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081018952886121ed565b60008761379d565b5081356135e2565b73ffffffffffffffffffffffffffffffffffffffff168015613c3a57600080809381935af1613bd4612450565b5015613bdc57565b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f41413931206661696c65642073656e6420746f2062656e6566696369617279006044820152fd5b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f4141393020696e76616c69642062656e656669636961727900000000000000006044820152fd5b73ffffffffffffffffffffffffffffffffffffffff166000526000602052613cc66040600020918254612409565b80915590565b610120610100820151910151808214613cf257480180821015613ced575090565b905090565b5090565b9190917f49628fd1471006c1482da88028e9ce4dbb080b815c9b0344d39e5a8e6ec1419f6080602083015192519473ffffffffffffffffffffffffffffffffffffffff946020868851169660e089015116970151916040519283526000602084015260408301526060820152a4565b60208101519051907f67b4fa9642f42120bf031f3051d1824b0fe25627945b27b8a6a65d5761d5482e60208073ffffffffffffffffffffffffffffffffffffffff855116940151604051908152a3565b613dcd604092959493956060835260608301906128ec565b9460208201520152565b8015613e6457600060408051613dec816121d1565b828152826020820152015273ffffffffffffffffffffffffffffffffffffffff811690604065ffffffffffff91828160a01c16908115613e5c575b60d01c92825191613e37836121d1565b8583528460208401521691829101524211908115613e5457509091565b905042109091565b839150613e27565b5060009060009056fea2646970667358221220b094fd69f04977ae9458e5ba422d01cd2d20dbcfca0992ff37f19aa07deec25464736f6c63430008170033", + "nonce": "0x1" + }, + "000f3df6d732807ef1319fb7b8bb8522d0beac02": { + "balance": "0x0", + "code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500", + "nonce": "0x1" + }, + "02484cb50aac86eae85610d6f4bf026f30f6627d": { + "balance": "0x21e19e0c9bab2400000" + }, + "08135da0a343e492fa2d4282f2ae34c6c5cc1bbe": { + "balance": "0x21e19e0c9bab2400000" + }, + "09db0a93b389bef724429898f539aeb7ac2dd55f": { + "balance": "0x21e19e0c9bab2400000" + }, + "0b799c86a49deeb90402691f1041aa3af2d3c875": { + "balance": "0x0", + "nonce": "0x1" + }, + "13b0d85ccb8bf860b6b79af3029fca081ae9bef2": { + "balance": "0x0", + "code": "0x6080604052600436106100435760003560e01c8063076c37b21461004f578063481286e61461007157806356299481146100ba57806366cfa057146100da57600080fd5b3661004a57005b600080fd5b34801561005b57600080fd5b5061006f61006a366004610327565b6100fa565b005b34801561007d57600080fd5b5061009161008c366004610327565b61014a565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390f35b3480156100c657600080fd5b506100916100d5366004610349565b61015d565b3480156100e657600080fd5b5061006f6100f53660046103ca565b610172565b61014582826040518060200161010f9061031a565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe082820381018352601f90910116604052610183565b505050565b600061015683836102e7565b9392505050565b600061016a8484846102f0565b949350505050565b61017d838383610183565b50505050565b6000834710156101f4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f437265617465323a20696e73756666696369656e742062616c616e636500000060448201526064015b60405180910390fd5b815160000361025f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f437265617465323a2062797465636f6465206c656e677468206973207a65726f60448201526064016101eb565b8282516020840186f5905073ffffffffffffffffffffffffffffffffffffffff8116610156576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f437265617465323a204661696c6564206f6e206465706c6f790000000000000060448201526064016101eb565b60006101568383305b6000604051836040820152846020820152828152600b8101905060ff815360559020949350505050565b61014e806104ad83390190565b6000806040838503121561033a57600080fd5b50508035926020909101359150565b60008060006060848603121561035e57600080fd5b8335925060208401359150604084013573ffffffffffffffffffffffffffffffffffffffff8116811461039057600080fd5b809150509250925092565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6000806000606084860312156103df57600080fd5b8335925060208401359150604084013567ffffffffffffffff8082111561040557600080fd5b818601915086601f83011261041957600080fd5b81358181111561042b5761042b61039b565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f011681019083821181831017156104715761047161039b565b8160405282815289602084870101111561048a57600080fd5b826020860160208301376000602084830101528095505050505050925092509256fe608060405234801561001057600080fd5b5061012e806100206000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063249cb3fa14602d575b600080fd5b603c603836600460b1565b604e565b60405190815260200160405180910390f35b60008281526020818152604080832073ffffffffffffffffffffffffffffffffffffffff8516845290915281205460ff16608857600060aa565b7fa2ef4600d742022d532d4747cb3547474667d6f13804902513b2ec01c848f4b45b9392505050565b6000806040838503121560c357600080fd5b82359150602083013573ffffffffffffffffffffffffffffffffffffffff8116811460ed57600080fd5b80915050925092905056fea26469706673582212205ffd4e6cede7d06a5daf93d48d0541fc68189eeb16608c1999a82063b666eb1164736f6c63430008130033a2646970667358221220fdc4a0fe96e3b21c108ca155438d37c9143fb01278a3c1d274948bad89c564ba64736f6c63430008130033", + "nonce": "0x1" + }, + "cd3b766ccdd6ae721141f452c550ca635964ce71": { + "balance": "0x21e19e0c9bab2400000" + }, + "dd2fd4581271e230360230f9337d5c0430bf44c0": { + "balance": "0x21e19e0c9bab2400000" + }, + "df37f81daad2b0327a0a50003740e1c935c70913": { + "balance": "0x21e19e0c9bab2400000" + }, + "df3e18d64bc6a983f673ab319ccae4f1a57c7097": { + "balance": "0x21e19e0c9bab2400000" + }, + "efc2c1444ebcc4db75e7613d20c6a62ff67a167c": { + "balance": "0x0", + "code": "0x6080600436101561000f57600080fd5b6000803560e01c63570e1a361461002557600080fd5b3461018a5760207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261018a576004359167ffffffffffffffff9081841161018657366023850112156101865783600401358281116101825736602482870101116101825780601411610182577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffec810192808411610155577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f81600b8501160116830190838210908211176101555792846024819482600c60209a968b9960405286845289840196603889018837830101525193013560601c5af1908051911561014d575b5073ffffffffffffffffffffffffffffffffffffffff60405191168152f35b90503861012e565b6024857f4e487b710000000000000000000000000000000000000000000000000000000081526041600452fd5b8380fd5b8280fd5b80fdfea26469706673582212207adef8895ad3393b02fab10a111d85ea80ff35366aa43995f4ea20e67f29200664736f6c63430008170033", + "nonce": "0x1" + }, + "f39fd6e51aad88f6f4ce6ab8827279cfffb92266": { + "balance": "0x21e19e0c9bab2400000" + }, + "fabb0ac9d68b0b445fb7357272ff202c5651694a": { + "balance": "0x21e19e0c9bab2400000" + }, + "fb1bffc9d739b8d520daf37df666da4c687191ea": { + "balance": "0x0", + "code": "0x6080604052600436106101dc5760003560e01c8063affed0e011610102578063e19a9dd911610095578063f08a032311610064578063f08a032314611647578063f698da2514611698578063f8dc5dd9146116c3578063ffa1ad741461173e57610231565b8063e19a9dd91461139b578063e318b52b146113ec578063e75235b81461147d578063e86637db146114a857610231565b8063cc2f8452116100d1578063cc2f8452146110e8578063d4d9bdcd146111b5578063d8d11f78146111f0578063e009cfde1461132a57610231565b8063affed0e014610d94578063b4faba0914610dbf578063b63e800d14610ea7578063c4ca3a9c1461101757610231565b80635624b25b1161017a5780636a761202116101495780636a761202146109945780637d83297414610b50578063934f3a1114610bbf578063a0e67e2b14610d2857610231565b80635624b25b146107fb5780635ae6bd37146108b9578063610b592514610908578063694e80c31461095957610231565b80632f54bf6e116101b65780632f54bf6e146104d35780633408e4701461053a578063468721a7146105655780635229073f1461067a57610231565b80630d582f131461029e57806312fb68e0146102f95780632d9ad53d1461046c57610231565b36610231573373ffffffffffffffffffffffffffffffffffffffff167f3d0ce9bfc3ed7d6862dbb28b2dea94561fe714a1b4d019aa8af39730d1ad7c3d346040518082815260200191505060405180910390a2005b34801561023d57600080fd5b5060007f6c9a6c4a39284e37ed1cf53d337577d14212a4870fb976a4366c693b939918d560001b905080548061027257600080f35b36600080373360601b365260008060143601600080855af13d6000803e80610299573d6000fd5b3d6000f35b3480156102aa57600080fd5b506102f7600480360360408110156102c157600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803590602001909291905050506117ce565b005b34801561030557600080fd5b5061046a6004803603608081101561031c57600080fd5b81019080803590602001909291908035906020019064010000000081111561034357600080fd5b82018360208201111561035557600080fd5b8035906020019184600183028401116401000000008311171561037757600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050509192919290803590602001906401000000008111156103da57600080fd5b8201836020820111156103ec57600080fd5b8035906020019184600183028401116401000000008311171561040e57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f82011690508083019250505050505050919291929080359060200190929190505050611bbe565b005b34801561047857600080fd5b506104bb6004803603602081101561048f57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050612440565b60405180821515815260200191505060405180910390f35b3480156104df57600080fd5b50610522600480360360208110156104f657600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050612512565b60405180821515815260200191505060405180910390f35b34801561054657600080fd5b5061054f6125e4565b6040518082815260200191505060405180910390f35b34801561057157600080fd5b506106626004803603608081101561058857600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190803590602001906401000000008111156105cf57600080fd5b8201836020820111156105e157600080fd5b8035906020019184600183028401116401000000008311171561060357600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050509192919290803560ff1690602001909291905050506125f1565b60405180821515815260200191505060405180910390f35b34801561068657600080fd5b506107776004803603608081101561069d57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190803590602001906401000000008111156106e457600080fd5b8201836020820111156106f657600080fd5b8035906020019184600183028401116401000000008311171561071857600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050509192919290803560ff1690602001909291905050506126fc565b60405180831515815260200180602001828103825283818151815260200191508051906020019080838360005b838110156107bf5780820151818401526020810190506107a4565b50505050905090810190601f1680156107ec5780820380516001836020036101000a031916815260200191505b50935050505060405180910390f35b34801561080757600080fd5b5061083e6004803603604081101561081e57600080fd5b810190808035906020019092919080359060200190929190505050612732565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561087e578082015181840152602081019050610863565b50505050905090810190601f1680156108ab5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b3480156108c557600080fd5b506108f2600480360360208110156108dc57600080fd5b81019080803590602001909291905050506127b9565b6040518082815260200191505060405180910390f35b34801561091457600080fd5b506109576004803603602081101561092b57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506127d1565b005b34801561096557600080fd5b506109926004803603602081101561097c57600080fd5b8101908080359060200190929190505050612b63565b005b610b3860048036036101408110156109ab57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190803590602001906401000000008111156109f257600080fd5b820183602082011115610a0457600080fd5b80359060200191846001830284011164010000000083111715610a2657600080fd5b9091929391929390803560ff169060200190929190803590602001909291908035906020019092919080359060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190640100000000811115610ab257600080fd5b820183602082011115610ac457600080fd5b80359060200191846001830284011164010000000083111715610ae657600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050509192919290505050612c9d565b60405180821515815260200191505060405180910390f35b348015610b5c57600080fd5b50610ba960048036036040811015610b7357600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050612edc565b6040518082815260200191505060405180910390f35b348015610bcb57600080fd5b50610d2660048036036060811015610be257600080fd5b810190808035906020019092919080359060200190640100000000811115610c0957600080fd5b820183602082011115610c1b57600080fd5b80359060200191846001830284011164010000000083111715610c3d57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f82011690508083019250505050505050919291929080359060200190640100000000811115610ca057600080fd5b820183602082011115610cb257600080fd5b80359060200191846001830284011164010000000083111715610cd457600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050509192919290505050612f01565b005b348015610d3457600080fd5b50610d3d612f90565b6040518080602001828103825283818151815260200191508051906020019060200280838360005b83811015610d80578082015181840152602081019050610d65565b505050509050019250505060405180910390f35b348015610da057600080fd5b50610da9613139565b6040518082815260200191505060405180910390f35b348015610dcb57600080fd5b50610ea560048036036040811015610de257600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190640100000000811115610e1f57600080fd5b820183602082011115610e3157600080fd5b80359060200191846001830284011164010000000083111715610e5357600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f82011690508083019250505050505050919291929050505061313f565b005b348015610eb357600080fd5b506110156004803603610100811015610ecb57600080fd5b8101908080359060200190640100000000811115610ee857600080fd5b820183602082011115610efa57600080fd5b80359060200191846020830284011164010000000083111715610f1c57600080fd5b909192939192939080359060200190929190803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190640100000000811115610f6757600080fd5b820183602082011115610f7957600080fd5b80359060200191846001830284011164010000000083111715610f9b57600080fd5b9091929391929390803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050613161565b005b34801561102357600080fd5b506110d26004803603608081101561103a57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803590602001909291908035906020019064010000000081111561108157600080fd5b82018360208201111561109357600080fd5b803590602001918460018302840111640100000000831117156110b557600080fd5b9091929391929390803560ff16906020019092919050505061331f565b6040518082815260200191505060405180910390f35b3480156110f457600080fd5b506111416004803603604081101561110b57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050613447565b60405180806020018373ffffffffffffffffffffffffffffffffffffffff168152602001828103825284818151815260200191508051906020019060200280838360005b838110156111a0578082015181840152602081019050611185565b50505050905001935050505060405180910390f35b3480156111c157600080fd5b506111ee600480360360208110156111d857600080fd5b8101908080359060200190929190505050613639565b005b3480156111fc57600080fd5b50611314600480360361014081101561121457600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803590602001909291908035906020019064010000000081111561125b57600080fd5b82018360208201111561126d57600080fd5b8035906020019184600183028401116401000000008311171561128f57600080fd5b9091929391929390803560ff169060200190929190803590602001909291908035906020019092919080359060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803590602001909291905050506137d8565b6040518082815260200191505060405180910390f35b34801561133657600080fd5b506113996004803603604081101561134d57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050613805565b005b3480156113a757600080fd5b506113ea600480360360208110156113be57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050613b96565b005b3480156113f857600080fd5b5061147b6004803603606081101561140f57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050613c1a565b005b34801561148957600080fd5b5061149261428c565b6040518082815260200191505060405180910390f35b3480156114b457600080fd5b506115cc60048036036101408110156114cc57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803590602001909291908035906020019064010000000081111561151357600080fd5b82018360208201111561152557600080fd5b8035906020019184600183028401116401000000008311171561154757600080fd5b9091929391929390803560ff169060200190929190803590602001909291908035906020019092919080359060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050614296565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561160c5780820151818401526020810190506115f1565b50505050905090810190601f1680156116395780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561165357600080fd5b506116966004803603602081101561166a57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919050505061443e565b005b3480156116a457600080fd5b506116ad61449f565b6040518082815260200191505060405180910390f35b3480156116cf57600080fd5b5061173c600480360360608110156116e657600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff1690602001909291908035906020019092919050505061451d565b005b34801561174a57600080fd5b50611753614950565b6040518080602001828103825283818151815260200191508051906020019080838360005b83811015611793578082015181840152602081019050611778565b50505050905090810190601f1680156117c05780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b6117d6614989565b600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff16141580156118405750600173ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614155b801561187857503073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614155b6118ea576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475332303300000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b600073ffffffffffffffffffffffffffffffffffffffff16600260008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16146119eb576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475332303400000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b60026000600173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16600260008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055508160026000600173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506003600081548092919060010191905055507f9465fa0c962cc76958e6373a993326400c1c94f8be2fe3a952adfa7f60b2ea2682604051808273ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a18060045414611bba57611bb981612b63565b5b5050565b611bd2604182614a2c90919063ffffffff16565b82511015611c48576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475330323000000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b6000808060008060005b8681101561243457611c648882614a66565b80945081955082965050505060008460ff16141561206d578260001c9450611c96604188614a2c90919063ffffffff16565b8260001c1015611d0e576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475330323100000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b8751611d2760208460001c614a9590919063ffffffff16565b1115611d9b576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475330323200000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b60006020838a01015190508851611dd182611dc360208760001c614a9590919063ffffffff16565b614a9590919063ffffffff16565b1115611e45576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475330323300000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b60606020848b010190506320c13b0b60e01b7bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19168773ffffffffffffffffffffffffffffffffffffffff166320c13b0b8d846040518363ffffffff1660e01b8152600401808060200180602001838103835285818151815260200191508051906020019080838360005b83811015611ee7578082015181840152602081019050611ecc565b50505050905090810190601f168015611f145780820380516001836020036101000a031916815260200191505b50838103825284818151815260200191508051906020019080838360005b83811015611f4d578082015181840152602081019050611f32565b50505050905090810190601f168015611f7a5780820380516001836020036101000a031916815260200191505b5094505050505060206040518083038186803b158015611f9957600080fd5b505afa158015611fad573d6000803e3d6000fd5b505050506040513d6020811015611fc357600080fd5b81019080805190602001909291905050507bffffffffffffffffffffffffffffffffffffffffffffffffffffffff191614612066576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475330323400000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b50506122b2565b60018460ff161415612181578260001c94508473ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16148061210a57506000600860008773ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008c81526020019081526020016000205414155b61217c576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475330323500000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b6122b1565b601e8460ff1611156122495760018a60405160200180807f19457468657265756d205369676e6564204d6573736167653a0a333200000000815250601c018281526020019150506040516020818303038152906040528051906020012060048603858560405160008152602001604052604051808581526020018460ff1681526020018381526020018281526020019450505050506020604051602081039080840390855afa158015612238573d6000803e3d6000fd5b5050506020604051035194506122b0565b60018a85858560405160008152602001604052604051808581526020018460ff1681526020018381526020018281526020019450505050506020604051602081039080840390855afa1580156122a3573d6000803e3d6000fd5b5050506020604051035194505b5b5b8573ffffffffffffffffffffffffffffffffffffffff168573ffffffffffffffffffffffffffffffffffffffff161180156123795750600073ffffffffffffffffffffffffffffffffffffffff16600260008773ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1614155b80156123b25750600173ffffffffffffffffffffffffffffffffffffffff168573ffffffffffffffffffffffffffffffffffffffff1614155b612424576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475330323600000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b8495508080600101915050611c52565b50505050505050505050565b60008173ffffffffffffffffffffffffffffffffffffffff16600173ffffffffffffffffffffffffffffffffffffffff161415801561250b5750600073ffffffffffffffffffffffffffffffffffffffff16600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1614155b9050919050565b6000600173ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff16141580156125dd5750600073ffffffffffffffffffffffffffffffffffffffff16600260008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1614155b9050919050565b6000804690508091505090565b60007fb648d3644f584ed1c2232d53c46d87e693586486ad0d1175f8656013110b714e3386868686604051808673ffffffffffffffffffffffffffffffffffffffff1681526020018573ffffffffffffffffffffffffffffffffffffffff1681526020018481526020018060200183600181111561266b57fe5b8152602001828103825284818151815260200191508051906020019080838360005b838110156126a857808201518184015260208101905061268d565b50505050905090810190601f1680156126d55780820380516001836020036101000a031916815260200191505b50965050505050505060405180910390a16126f285858585614ab4565b9050949350505050565b6000606061270c868686866125f1565b915060405160203d0181016040523d81523d6000602083013e8091505094509492505050565b606060006020830267ffffffffffffffff8111801561275057600080fd5b506040519080825280601f01601f1916602001820160405280156127835781602001600182028036833780820191505090505b50905060005b838110156127ae57808501548060208302602085010152508080600101915050612789565b508091505092915050565b60076020528060005260406000206000915090505481565b6127d9614989565b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16141580156128435750600173ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614155b6128b5576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475331303100000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b600073ffffffffffffffffffffffffffffffffffffffff16600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16146129b6576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475331303200000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b60016000600173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055508060016000600173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055507fecdf3a3effea5783a3c4c2140e677577666428d44ed9d474a0b3a4c9943f844081604051808273ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a150565b612b6b614989565b600354811115612be3576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475332303100000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b6001811015612c5a576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475332303200000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b806004819055507f610f7ff2b304ae8903c3de74c60c6ab1f7d6226b3f52c5161905bb5ad4039c936004546040518082815260200191505060405180910390a150565b6000606060055433600454604051602001808481526020018373ffffffffffffffffffffffffffffffffffffffff168152602001828152602001935050505060405160208183030381529060405290507f66753cd2356569ee081232e3be8909b950e0a76c1f8460c3a5e3c2be32b11bed8d8d8d8d8d8d8d8d8d8d8d8c604051808d73ffffffffffffffffffffffffffffffffffffffff1681526020018c8152602001806020018a6001811115612d5057fe5b81526020018981526020018881526020018781526020018673ffffffffffffffffffffffffffffffffffffffff1681526020018573ffffffffffffffffffffffffffffffffffffffff168152602001806020018060200184810384528e8e82818152602001925080828437600081840152601f19601f820116905080830192505050848103835286818151815260200191508051906020019080838360005b83811015612e0a578082015181840152602081019050612def565b50505050905090810190601f168015612e375780820380516001836020036101000a031916815260200191505b50848103825285818151815260200191508051906020019080838360005b83811015612e70578082015181840152602081019050612e55565b50505050905090810190601f168015612e9d5780820380516001836020036101000a031916815260200191505b509f5050505050505050505050505050505060405180910390a1612eca8d8d8d8d8d8d8d8d8d8d8d614c9a565b9150509b9a5050505050505050505050565b6008602052816000526040600020602052806000526040600020600091509150505481565b6000600454905060008111612f7e576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475330303100000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b612f8a84848484611bbe565b50505050565b6060600060035467ffffffffffffffff81118015612fad57600080fd5b50604051908082528060200260200182016040528015612fdc5781602001602082028036833780820191505090505b50905060008060026000600173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690505b600173ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614613130578083838151811061308757fe5b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff1681525050600260008273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690508180600101925050613046565b82935050505090565b60055481565b600080825160208401855af4806000523d6020523d600060403e60403d016000fd5b6131ac8a8a80806020026020016040519081016040528093929190818152602001838360200280828437600081840152601f19601f82011690508083019250505050505050896151d7565b600073ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff16146131ea576131e9846156d7565b5b6132388787878080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f82011690508083019250505050505050615706565b60008211156132525761325082600060018685615941565b505b3373ffffffffffffffffffffffffffffffffffffffff167f141df868a6331af528e38c83b7aa03edc19be66e37ae67f9285bf4f8e3c6a1a88b8b8b8b8960405180806020018581526020018473ffffffffffffffffffffffffffffffffffffffff1681526020018373ffffffffffffffffffffffffffffffffffffffff1681526020018281038252878782818152602001925060200280828437600081840152601f19601f820116905080830192505050965050505050505060405180910390a250505050505050505050565b6000805a9050613376878787878080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f82011690508083019250505050505050865a615b47565b61337f57600080fd5b60005a8203905080604051602001808281526020019150506040516020818303038152906040526040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825283818151815260200191508051906020019080838360005b8381101561340c5780820151818401526020810190506133f1565b50505050905090810190601f1680156134395780820380516001836020036101000a031916815260200191505b509250505060405180910390fd5b606060008267ffffffffffffffff8111801561346257600080fd5b506040519080825280602002602001820160405280156134915781602001602082028036833780820191505090505b509150600080600160008773ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690505b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16141580156135645750600173ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614155b801561356f57508482105b1561362a578084838151811061358157fe5b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff1681525050600160008273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905081806001019250506134fa565b80925081845250509250929050565b600073ffffffffffffffffffffffffffffffffffffffff16600260003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16141561373b576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475330333000000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b6001600860003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000838152602001908152602001600020819055503373ffffffffffffffffffffffffffffffffffffffff16817ff2a0eb156472d1440255b0d7c1e19cc07115d1051fe605b0dce69acfec884d9c60405160405180910390a350565b60006137ed8c8c8c8c8c8c8c8c8c8c8c614296565b8051906020012090509b9a5050505050505050505050565b61380d614989565b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16141580156138775750600173ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614155b6138e9576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475331303100000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b8073ffffffffffffffffffffffffffffffffffffffff16600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16146139e9576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475331303300000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b600160008273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506000600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055507faab4fa2b463f581b2b32cb3b7e3b704b9ce37cc209b5fb4d77e593ace405427681604051808273ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a15050565b613b9e614989565b60007f4a204f620c8c5ccdca3fd54d003badd85ba500436a431f0cbda4f558c93c34c860001b90508181557f1151116914515bc0891ff9047a6cb32cf902546f83066499bcf8ba33d2353fa282604051808273ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a15050565b613c22614989565b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614158015613c8c5750600173ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614155b8015613cc457503073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614155b613d36576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475332303300000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b600073ffffffffffffffffffffffffffffffffffffffff16600260008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1614613e37576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475332303400000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614158015613ea15750600173ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614155b613f13576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475332303300000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff16600260008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1614614013576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475332303500000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b600260008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16600260008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600260008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506000600260008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055507ff8d49fc529812e9a7c5c50e69c20f0dccc0db8fa95c98bc58cc9a4f1c1299eaf82604051808273ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a17f9465fa0c962cc76958e6373a993326400c1c94f8be2fe3a952adfa7f60b2ea2681604051808273ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a1505050565b6000600454905090565b606060007fbb8310d486368db6bd6f849402fdd73ad53d316b5a4b2644ad6efe0f941286d860001b8d8d8d8d60405180838380828437808301925050509250505060405180910390208c8c8c8c8c8c8c604051602001808c81526020018b73ffffffffffffffffffffffffffffffffffffffff1681526020018a815260200189815260200188600181111561432757fe5b81526020018781526020018681526020018581526020018473ffffffffffffffffffffffffffffffffffffffff1681526020018373ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019b505050505050505050505050604051602081830303815290604052805190602001209050601960f81b600160f81b6143b361449f565b8360405160200180857effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff19168152600101847effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191681526001018381526020018281526020019450505050506040516020818303038152906040529150509b9a5050505050505050505050565b614446614989565b61444f816156d7565b7f5ac6c46c93c8d0e53714ba3b53db3e7c046da994313d7ed0d192028bc7c228b081604051808273ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a150565b60007f47e79534a245952e8b16893a336b85a3d9ea9fa8c573f3d803afb92a7946921860001b6144cd6125e4565b30604051602001808481526020018381526020018273ffffffffffffffffffffffffffffffffffffffff168152602001935050505060405160208183030381529060405280519060200120905090565b614525614989565b8060016003540310156145a0576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475332303100000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff161415801561460a5750600173ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614155b61467c576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475332303300000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff16600260008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff161461477c576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475332303500000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b600260008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16600260008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506000600260008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550600360008154809291906001900391905055507ff8d49fc529812e9a7c5c50e69c20f0dccc0db8fa95c98bc58cc9a4f1c1299eaf82604051808273ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a1806004541461494b5761494a81612b63565b5b505050565b6040518060400160405280600581526020017f312e332e3000000000000000000000000000000000000000000000000000000081525081565b3073ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614614a2a576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475330333100000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b565b600080831415614a3f5760009050614a60565b6000828402905082848281614a5057fe5b0414614a5b57600080fd5b809150505b92915050565b60008060008360410260208101860151925060408101860151915060ff60418201870151169350509250925092565b600080828401905083811015614aaa57600080fd5b8091505092915050565b6000600173ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614158015614b7f5750600073ffffffffffffffffffffffffffffffffffffffff16600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1614155b614bf1576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475331303400000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b614bfe858585855a615b47565b90508015614c4e573373ffffffffffffffffffffffffffffffffffffffff167f6895c13664aa4f67288b25d7a21d7aaa34916e355fb9b6fae0a139a9085becb860405160405180910390a2614c92565b3373ffffffffffffffffffffffffffffffffffffffff167facd2c8702804128fdb0db2bb49f6d127dd0181c13fd45dbfe16de0930e2bd37560405160405180910390a25b949350505050565b6000806000614cb48e8e8e8e8e8e8e8e8e8e600554614296565b905060056000815480929190600101919050555080805190602001209150614cdd828286612f01565b506000614ce8615b93565b9050600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614614ece578073ffffffffffffffffffffffffffffffffffffffff166375f0bb528f8f8f8f8f8f8f8f8f8f8f336040518d63ffffffff1660e01b8152600401808d73ffffffffffffffffffffffffffffffffffffffff1681526020018c8152602001806020018a6001811115614d8b57fe5b81526020018981526020018881526020018781526020018673ffffffffffffffffffffffffffffffffffffffff1681526020018573ffffffffffffffffffffffffffffffffffffffff168152602001806020018473ffffffffffffffffffffffffffffffffffffffff16815260200183810383528d8d82818152602001925080828437600081840152601f19601f820116905080830192505050838103825285818151815260200191508051906020019080838360005b83811015614e5d578082015181840152602081019050614e42565b50505050905090810190601f168015614e8a5780820380516001836020036101000a031916815260200191505b509e505050505050505050505050505050600060405180830381600087803b158015614eb557600080fd5b505af1158015614ec9573d6000803e3d6000fd5b505050505b6101f4614ef56109c48b01603f60408d0281614ee657fe5b04615bc490919063ffffffff16565b015a1015614f6b576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475330313000000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b60005a9050614fd48f8f8f8f8080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050508e60008d14614fc9578e614fcf565b6109c45a035b615b47565b9350614fe95a82615bde90919063ffffffff16565b90508380614ff8575060008a14155b80615004575060008814155b615076576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475330313300000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b6000808911156150905761508d828b8b8b8b615941565b90505b84156150da577f442e715f626346e8c54381002da614f62bee8d27386535b2521ec8540898556e8482604051808381526020018281526020019250505060405180910390a161511a565b7f23428b18acfb3ea64b08dc0c1d296ea9c09702c09083ca5272e64d115b687d238482604051808381526020018281526020019250505060405180910390a15b5050600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16146151c6578073ffffffffffffffffffffffffffffffffffffffff16639327136883856040518363ffffffff1660e01b815260040180838152602001821515815260200192505050600060405180830381600087803b1580156151ad57600080fd5b505af11580156151c1573d6000803e3d6000fd5b505050505b50509b9a5050505050505050505050565b60006004541461524f576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475332303000000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b81518111156152c6576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475332303100000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b600181101561533d576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475332303200000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b60006001905060005b835181101561564357600084828151811061535d57fe5b60200260200101519050600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16141580156153d15750600173ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614155b801561540957503073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614155b801561544157508073ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff1614155b6154b3576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475332303300000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b600073ffffffffffffffffffffffffffffffffffffffff16600260008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16146155b4576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475332303400000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b80600260008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550809250508080600101915050615346565b506001600260008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550825160038190555081600481905550505050565b60007f6c9a6c4a39284e37ed1cf53d337577d14212a4870fb976a4366c693b939918d560001b90508181555050565b600073ffffffffffffffffffffffffffffffffffffffff1660016000600173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1614615808576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475331303000000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b6001806000600173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff161461593d576158ca8260008360015a615b47565b61593c576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475330303000000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b5b5050565b600080600073ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff161461597e5782615980565b325b9050600073ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff161415615a98576159ea3a86106159c7573a6159c9565b855b6159dc888a614a9590919063ffffffff16565b614a2c90919063ffffffff16565b91508073ffffffffffffffffffffffffffffffffffffffff166108fc839081150290604051600060405180830381858888f19350505050615a93576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475330313100000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b615b3d565b615abd85615aaf888a614a9590919063ffffffff16565b614a2c90919063ffffffff16565b9150615aca848284615bfe565b615b3c576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260058152602001807f475330313200000000000000000000000000000000000000000000000000000081525060200191505060405180910390fd5b5b5095945050505050565b6000600180811115615b5557fe5b836001811115615b6157fe5b1415615b7a576000808551602087018986f49050615b8a565b600080855160208701888a87f190505b95945050505050565b6000807f4a204f620c8c5ccdca3fd54d003badd85ba500436a431f0cbda4f558c93c34c860001b9050805491505090565b600081831015615bd45781615bd6565b825b905092915050565b600082821115615bed57600080fd5b600082840390508091505092915050565b60008063a9059cbb8484604051602401808373ffffffffffffffffffffffffffffffffffffffff168152602001828152602001925050506040516020818303038152906040529060e01b6020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff83818316178352505050509050602060008251602084016000896127105a03f13d60008114615ca55760208114615cad5760009350615cb8565b819350615cb8565b600051158215171593505b505050939250505056fea2646970667358221220047fac33099ca576d1c4f1ac6a8abdb0396e42ad6a397d2cb2f4dc1624cc0c5b64736f6c63430007060033", + "nonce": "0x1" + } + }, + "baseFeePerGas": "0x1", + "blobGasUsed": "0x0", + "coinbase": "0x4200000000000000000000000000000000000011", + "config": { + "arrowGlacierBlock": 0, + "bedrockBlock": 0, + "berlinBlock": 0, + "byzantiumBlock": 0, + "cancunTime": 0, + "canyonTime": 0, + "chainId": 901, + "constantinopleBlock": 0, + "depositContractAddress": "0x4242424242424242424242424242424242424242", + "ecotoneTime": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "fjordTime": 0, + "graniteTime": 0, + "grayGlacierBlock": 0, + "homesteadBlock": 0, + "istanbulBlock": 0, + "londonBlock": 0, + "mergeNetsplitBlock": 0, + "muirGlacierBlock": 0, + "isthmusTime": 100000000000000, + "pragueTime": 100000000000000, + "blobSchedule": { + "cancun": { + "target": 3, + "max": 6, + "baseFeeUpdateFraction": 3338477 + }, + "prague": { + "target": 6, + "max": 9, + "baseFeeUpdateFraction": 5007716 + }, + "osaka": { + "target": 9, + "max": 12, + "baseFeeUpdateFraction": 5007716 + } + }, + "optimism": { + "eip1559Denominator": 50, + "eip1559DenominatorCanyon": 250, + "eip1559Elasticity": 6 + }, + "petersburgBlock": 0, + "regolithTime": 0, + "shanghaiTime": 0, + "terminalTotalDifficulty": 0, + "terminalTotalDifficultyPassed": true + }, + "difficulty": "0x0", + "excessBlobGas": "0x0", + "extraData": "0x", + "gasLimit": "0x1c9c380", + "gasUsed": "0x0", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": "0x0", + "number": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "timestamp": "" +} \ No newline at end of file diff --git a/rust/rollup-boost/crates/rollup-boost/src/tests/common/test_data/jwt_secret.hex b/rust/rollup-boost/crates/rollup-boost/src/tests/common/test_data/jwt_secret.hex new file mode 100644 index 0000000000000..6e72091cdd521 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/tests/common/test_data/jwt_secret.hex @@ -0,0 +1 @@ +688f5d737bad920bdfb2fc2f488d6b6209eebda1dae949a8de91398d932c517a diff --git a/rust/rollup-boost/crates/rollup-boost/src/tests/common/test_data/p2p_secret.hex b/rust/rollup-boost/crates/rollup-boost/src/tests/common/test_data/p2p_secret.hex new file mode 100644 index 0000000000000..499ea6cdec436 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/tests/common/test_data/p2p_secret.hex @@ -0,0 +1 @@ +a11ac89899cd86e36b6fb881ec1255b8a92a688790b7d950f8b7d8dd626671fb diff --git a/rust/rollup-boost/crates/rollup-boost/src/tests/execution_mode.rs b/rust/rollup-boost/crates/rollup-boost/src/tests/execution_mode.rs new file mode 100644 index 0000000000000..a060834a2f44b --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/tests/execution_mode.rs @@ -0,0 +1,97 @@ +use super::common::{RollupBoostTestHarnessBuilder, proxy::BuilderProxyHandler}; +use crate::ExecutionMode; +use futures::FutureExt as _; +use serde_json::Value; +use std::{ + pin::Pin, + sync::{Arc, Mutex}, + time::Duration, +}; + +struct CounterHandler { + counter: Arc>, +} + +impl BuilderProxyHandler for CounterHandler { + fn handle( + &self, + method: String, + _params: Value, + _result: Value, + ) -> Pin> + Send>> { + // Only count Engine API calls, not health check calls + if method != "eth_getBlockByNumber" { + *self.counter.lock().unwrap() += 1; + tracing::info!("Proxy handler intercepted Engine API call: {}", method); + } else { + tracing::debug!("Proxy handler intercepted health check call: {}", method); + } + async move { None }.boxed() + } +} + +#[tokio::test] +async fn execution_mode() -> eyre::Result<()> { + // Create a counter that increases whenever we receive a new RPC call in the builder + let counter = Arc::new(Mutex::new(0)); + let handler = Arc::new(CounterHandler { counter: counter.clone() }); + + let harness = + RollupBoostTestHarnessBuilder::new("execution_mode").proxy_handler(handler).build().await?; + let mut block_generator = harness.block_generator().await?; + + // start creating 5 empty blocks which are processed by the builder + for _ in 0..5 { + let (_block, block_creator) = block_generator.generate_block(false).await?; + assert!(block_creator.is_builder(), "Block creator should be the builder"); + } + + let client = harness.debug_client().await; + + // enable dry run mode + { + let response = client.set_execution_mode(ExecutionMode::DryRun).await.unwrap(); + assert_eq!(response.execution_mode, ExecutionMode::DryRun); + + // the new valid block should be created the the l2 builder + let (_block, block_creator) = block_generator.generate_block(false).await?; + assert!(block_creator.is_l2(), "Block creator should be l2"); + } + + // toggle again dry run mode + { + let response = client.set_execution_mode(ExecutionMode::Enabled).await.unwrap(); + assert_eq!(response.execution_mode, ExecutionMode::Enabled); + + // the new valid block should be created the the builder + let (_block, block_creator) = block_generator.generate_block(false).await?; + assert!(block_creator.is_builder(), "Block creator should be the builder"); + } + + // sleep for 1 second so that it has time to send the last FCU request to the builder + // and there is not a race condition with the disable call + std::thread::sleep(Duration::from_secs(1)); + + tracing::info!("Setting execution mode to disabled"); + + // Set the execution mode to disabled and reset the counter in the proxy to 0 + // to track the number of calls to the builder during the disabled mode which + // should be 0 + { + let response = client.set_execution_mode(ExecutionMode::Disabled).await.unwrap(); + assert_eq!(response.execution_mode, ExecutionMode::Disabled); + + // reset the counter in the proxy + *counter.lock().unwrap() = 0; + + // create 5 blocks which are processed by the l2 clients + for _ in 0..5 { + let (_block, block_creator) = block_generator.generate_block(false).await?; + assert!(block_creator.is_l2(), "Block creator should be l2"); + } + + assert_eq!(*counter.lock().unwrap(), 0, "Number of calls to the builder should be 0",); + } + + Ok(()) +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/tests/fcu_no_block_time_delay.rs b/rust/rollup-boost/crates/rollup-boost/src/tests/fcu_no_block_time_delay.rs new file mode 100644 index 0000000000000..f22b5cb55fe75 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/tests/fcu_no_block_time_delay.rs @@ -0,0 +1,53 @@ +use super::common::{RollupBoostTestHarnessBuilder, proxy::BuilderProxyHandler}; + +use futures::FutureExt; +use serde_json::Value; +use std::{pin::Pin, sync::Arc, time::Duration}; + +// TODO: Use the same implementation as in builder_full_delay.rs +struct DelayHandler { + delay: Duration, +} + +impl DelayHandler { + pub fn new(delay: Duration) -> Self { + Self { delay } + } +} + +impl BuilderProxyHandler for DelayHandler { + fn handle( + &self, + _method: String, + _params: Value, + _result: Value, + ) -> Pin> + Send>> { + let delay = self.delay; + async move { + tokio::time::sleep(delay).await; + None + } + .boxed() + } +} + +#[tokio::test] +async fn fcu_no_block_time_delay() -> eyre::Result<()> { + // This test ensures that even with delay in between RB and the external builder (50ms) the + // builder can still build the block if there is an avalanche of FCUs without block time delay + let delay = DelayHandler::new(Duration::from_millis(50)); + + let harness = RollupBoostTestHarnessBuilder::new("fcu_no_block_time_delay") + .proxy_handler(Arc::new(delay)) + .build() + .await?; + let mut block_generator = harness.block_generator().await?; + block_generator.set_block_time(Duration::from_millis(0)); + + for _ in 0..30 { + let (_block, block_creator) = block_generator.generate_block(false).await?; + assert!(block_creator.is_builder(), "Block creator should be the builder"); + } + + Ok(()) +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/tests/l2_state_root_computation.rs b/rust/rollup-boost/crates/rollup-boost/src/tests/l2_state_root_computation.rs new file mode 100644 index 0000000000000..18e7ff875cf1a --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/tests/l2_state_root_computation.rs @@ -0,0 +1,68 @@ +use std::{pin::Pin, sync::Arc}; + +use super::common::{RollupBoostTestHarnessBuilder, proxy::BuilderProxyHandler}; +use alloy_primitives::B256; +use futures::FutureExt as _; +use serde_json::Value; + +use op_alloy_rpc_types_engine::OpExecutionPayloadEnvelopeV3; + +struct ZeroStateRootHandler; + +impl BuilderProxyHandler for ZeroStateRootHandler { + fn handle( + &self, + method: String, + _params: Value, + _result: Value, + ) -> Pin> + Send>> { + async move { + if method != "engine_getPayloadV3" { + return None; + } + + let mut payload = + serde_json::from_value::(_result).unwrap(); + + // Set state root to zero to simulate builder payload without computed state root + payload.execution_payload.payload_inner.payload_inner.state_root = B256::ZERO; + + let result = serde_json::to_value(&payload).unwrap(); + Some(result) + } + .boxed() + } +} + +#[tokio::test] +async fn test_l2_state_root_computation() -> eyre::Result<()> { + // Test that when builder returns payload with zero state root and L2 state root computation + // is enabled, rollup-boost uses L2 client to compute the correct state root + let harness = RollupBoostTestHarnessBuilder::new("l2_state_root_computation") + .proxy_handler(Arc::new(ZeroStateRootHandler)) + .with_l2_state_root_computation(true) + .build() + .await?; + + let mut block_generator = harness.block_generator().await?; + + // Generate blocks and verify they are processed successfully by the builder + // with L2 computing the correct state root + for _ in 0..3 { + let (_block, block_creator) = block_generator.generate_block(false).await?; + assert!(block_creator.is_builder(), "Block creator should be the builder"); + } + + // Check logs to verify L2 state root computation was used + let logs = std::fs::read_to_string(harness.rollup_boost.args().log_file.clone().unwrap())?; + assert!( + logs.contains("sent FCU to l2 to calculate new state root"), + "Logs should contain message indicating L2 state root computation was used" + ); + assert!( + logs.contains("received new state root payload from l2"), + "Logs should contain message indicating L2 returned corrected payload" + ); + + Ok(()) +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/tests/miner_set_max_da_size.rs b/rust/rollup-boost/crates/rollup-boost/src/tests/miner_set_max_da_size.rs new file mode 100644 index 0000000000000..5576eb10be5d0 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/tests/miner_set_max_da_size.rs @@ -0,0 +1,79 @@ +use common::{RollupBoostTestHarnessBuilder, proxy::BuilderProxyHandler}; +use futures::FutureExt as _; +use serde_json::Value; +use std::pin::Pin; +use std::sync::{Arc, Mutex}; +use std::time::Duration; +use testcontainers::core::client::docker_client_instance; + +mod common; + +#[derive(Debug, Clone, Default)] +struct MaxDaSizeHandler { + found_max_da_value: Arc>, +} + +impl MaxDaSizeHandler { + fn get_found_max_da_value(&self) -> u64 { + *self.found_max_da_value.lock().unwrap() + } +} + +impl BuilderProxyHandler for MaxDaSizeHandler { + fn handle( + &self, + method: String, + params: Value, + _result: Value, + ) -> Pin> + Send>> { + println!("method: {method:?}"); + if method == "miner_setMaxDASize" { + // decode the params + let params: Vec = serde_json::from_value(params).unwrap(); + assert_eq!(params.len(), 2); + *self.found_max_da_value.lock().unwrap() = params[0]; + } + async move { None }.boxed() + } +} + +#[tokio::test] +async fn miner_set_max_da_size() -> eyre::Result<()> { + let handler = Arc::new(MaxDaSizeHandler::default()); + + let harness = RollupBoostTestHarnessBuilder::new("miner_set_max_da_size") + .with_isthmus_block(0) + .proxy_handler(handler.clone()) + .build() + .await?; + let mut block_generator = harness.block_generator().await?; + block_generator.generate_builder_blocks(1).await?; + + let engine_api = harness.engine_api()?; + + // stop the builder + let client = docker_client_instance().await?; + client.pause_container(harness.builder.id()).await?; + tokio::time::sleep(Duration::from_secs(2)).await; + + let first_val = 1000000; + let _res = engine_api.set_max_da_size(first_val, first_val).await; + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + let found = handler.get_found_max_da_value(); + assert_eq!(found, 0); + + let second_val = 2000000; + let _res = engine_api.set_max_da_size(second_val, second_val).await; + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + let found = handler.get_found_max_da_value(); + assert_eq!(found, 0); + + // restart the builder + client.unpause_container(harness.builder.id()).await?; + + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + let found = handler.get_found_max_da_value(); + assert_eq!(found, second_val); + + Ok(()) +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/tests/mod.rs b/rust/rollup-boost/crates/rollup-boost/src/tests/mod.rs new file mode 100644 index 0000000000000..bd0facbd308c3 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/tests/mod.rs @@ -0,0 +1,13 @@ +mod common; + +mod builder_full_delay; +mod builder_returns_incorrect_block; +mod execution_mode; +mod fcu_no_block_time_delay; +mod l2_state_root_computation; +mod no_tx_pool; +mod remote_builder_down; +mod simple; +mod simple_isthmus; +mod simple_isthmus_transition; +mod unhealthy_builder_traffic; diff --git a/rust/rollup-boost/crates/rollup-boost/src/tests/no_tx_pool.rs b/rust/rollup-boost/crates/rollup-boost/src/tests/no_tx_pool.rs new file mode 100644 index 0000000000000..8a2bfea315e82 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/tests/no_tx_pool.rs @@ -0,0 +1,23 @@ +use super::common::RollupBoostTestHarnessBuilder; + +#[tokio::test] +async fn no_tx_pool() -> eyre::Result<()> { + let harness = RollupBoostTestHarnessBuilder::new("no_tx_pool").build().await?; + let mut block_generator = harness.block_generator().await?; + + // start creating 5 empty blocks which are processed by the L2 builder + for _ in 0..5 { + let (_block, block_creator) = block_generator.generate_block(true).await?; + assert!(block_creator.is_l2(), "Block creator should be l2"); + } + + // process 5 more non empty blocks which are processed by the builder. + // The builder should be on sync because it has received the new payload requests from + // rollup-boost. + for _ in 0..5 { + let (_block, block_creator) = block_generator.generate_block(false).await?; + assert!(block_creator.is_builder(), "Block creator should be the builder"); + } + + Ok(()) +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/tests/remote_builder_down.rs b/rust/rollup-boost/crates/rollup-boost/src/tests/remote_builder_down.rs new file mode 100644 index 0000000000000..c36bc837396a1 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/tests/remote_builder_down.rs @@ -0,0 +1,43 @@ +use std::time::Duration; + +use testcontainers::core::client::docker_client_instance; + +use super::common::RollupBoostTestHarnessBuilder; + +#[tokio::test] +async fn remote_builder_down() -> eyre::Result<()> { + let harness = RollupBoostTestHarnessBuilder::new("remote_builder_down").build().await?; + let mut block_generator = harness.block_generator().await?; + + for _ in 0..3 { + let (_block, block_creator) = block_generator.generate_block(false).await?; + assert!(block_creator.is_builder(), "Block creator should be the builder"); + } + + // stop the builder + let client = docker_client_instance().await?; + client.pause_container(harness.builder.id()).await?; + tokio::time::sleep(Duration::from_secs(2)).await; + + // create 3 new blocks that are processed by the l2 builder + for _ in 0..3 { + let (_block, block_creator) = block_generator.generate_block(false).await?; + assert!(block_creator.is_l2(), "Block creator should be l2"); + } + + client.unpause_container(harness.builder.id()).await?; + + // Generate a new block so that the builder can use the FCU request + // to sync up the missing blocks with the L2 client + let _ = block_generator.generate_block(false).await?; + tokio::time::sleep(Duration::from_secs(2)).await; + + // create 3 new blocks that are processed by the l2 builder because the builder is not synced + // with the previous 3 blocks + for _ in 0..3 { + let (_block, block_creator) = block_generator.generate_block(false).await?; + assert!(block_creator.is_builder(), "Block creator should be builder"); + } + + Ok(()) +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/tests/simple.rs b/rust/rollup-boost/crates/rollup-boost/src/tests/simple.rs new file mode 100644 index 0000000000000..69500dd535c50 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/tests/simple.rs @@ -0,0 +1,11 @@ +use super::common::RollupBoostTestHarnessBuilder; + +#[tokio::test] +async fn test_integration_simple() -> eyre::Result<()> { + let harness = + RollupBoostTestHarnessBuilder::new("simple").with_isthmus_block(5).build().await?; + let mut block_generator = harness.block_generator().await?; + block_generator.generate_builder_blocks(10).await?; + + Ok(()) +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/tests/simple_isthmus.rs b/rust/rollup-boost/crates/rollup-boost/src/tests/simple_isthmus.rs new file mode 100644 index 0000000000000..1498207b0cc46 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/tests/simple_isthmus.rs @@ -0,0 +1,11 @@ +use super::common::RollupBoostTestHarnessBuilder; + +#[tokio::test] +async fn test_integration_simple_isthmus() -> eyre::Result<()> { + let harness = + RollupBoostTestHarnessBuilder::new("simple_isthmus").with_isthmus_block(0).build().await?; + let mut block_generator = harness.block_generator().await?; + block_generator.generate_builder_blocks(10).await?; + + Ok(()) +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/tests/simple_isthmus_transition.rs b/rust/rollup-boost/crates/rollup-boost/src/tests/simple_isthmus_transition.rs new file mode 100644 index 0000000000000..c429c04875138 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/tests/simple_isthmus_transition.rs @@ -0,0 +1,13 @@ +use super::common::RollupBoostTestHarnessBuilder; + +#[tokio::test] +async fn test_integration_simple_isthmus_transition() -> eyre::Result<()> { + let harness = RollupBoostTestHarnessBuilder::new("simple_isthmus_transition") + .with_isthmus_block(5) + .build() + .await?; + let mut block_generator = harness.block_generator().await?; + block_generator.generate_builder_blocks(10).await?; + + Ok(()) +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/tests/unhealthy_builder_traffic.rs b/rust/rollup-boost/crates/rollup-boost/src/tests/unhealthy_builder_traffic.rs new file mode 100644 index 0000000000000..6fa2579bec9e4 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/tests/unhealthy_builder_traffic.rs @@ -0,0 +1,96 @@ +use super::common::{RollupBoostTestHarnessBuilder, proxy::BuilderProxyHandler}; +use crate::ExecutionMode; +use futures::FutureExt as _; +use serde_json::Value; +use std::{ + pin::Pin, + sync::{Arc, Mutex}, + time::Duration, +}; + +struct CounterHandler { + counter: Arc>, +} + +impl BuilderProxyHandler for CounterHandler { + fn handle( + &self, + method: String, + _params: Value, + _result: Value, + ) -> Pin> + Send>> { + // Only count Engine API calls, not health check calls + if method != "eth_getBlockByNumber" { + *self.counter.lock().unwrap() += 1; + tracing::info!("Proxy handler intercepted Engine API call: {}", method); + } else { + tracing::debug!("Proxy handler intercepted health check call: {}", method); + } + async move { None }.boxed() + } +} + +#[tokio::test] +async fn no_traffic_to_unhealthy_builder_when_flag_disabled() -> eyre::Result<()> { + // Create a counter that tracks Engine API calls to the builder + let counter = Arc::new(Mutex::new(0)); + let handler = Arc::new(CounterHandler { counter: counter.clone() }); + + // Create test harness with: + // - ignore_unhealthy_builders=true (key test parameter) + // - short max_unsafe_interval=1 to make builder unhealthy quickly + let harness = RollupBoostTestHarnessBuilder::new("no_traffic_unhealthy") + .proxy_handler(handler) + .with_ignore_unhealthy_builders(true) + .with_max_unsafe_interval(1) + .build() + .await?; + + // Override max_unsafe_interval to 1 second for this test + // We'll need to modify the config after build - let's access the rollup boost args + + let mut block_generator = harness.block_generator().await?; + let client = harness.debug_client().await; + + // Step 1: Disable execution mode so L2 moves ahead and builder falls behind + let response = client.set_execution_mode(ExecutionMode::Disabled).await.unwrap(); + assert_eq!(response.execution_mode, ExecutionMode::Disabled); + + // Step 2: Let L2 move ahead by generating some blocks + for _ in 0..3 { + let (_block, block_creator) = block_generator.generate_block(false).await?; + assert!(block_creator.is_l2(), "Blocks should be created by L2 when execution disabled"); + } + + // Step 3: Re-enable execution mode + let response = client.set_execution_mode(ExecutionMode::Enabled).await.unwrap(); + assert_eq!(response.execution_mode, ExecutionMode::Enabled); + + // Step 4:Wait for health check to run again and mark builder as unhealthy + // since execution mode is now enabled and builder timestamp is stale + tokio::time::sleep(Duration::from_secs(2)).await; + + // Reset counter after builder should be marked unhealthy + *counter.lock().unwrap() = 0; + + // Step 5: Generate blocks - builder should now be unhealthy and with flag=false, + // no engine API calls should go to the builder + for _i in 0..5 { + let (_block, block_creator) = block_generator.generate_block(false).await?; + // Blocks should be created by L2 since builder is unhealthy and flag is false + assert!( + block_creator.is_l2(), + "Block creator should be L2 when builder is unhealthy and flag is false" + ); + } + + let final_count = *counter.lock().unwrap(); + + // With flag=false and unhealthy builder, should see zero engine API calls + assert_eq!( + final_count, 0, + "Should see no Engine API calls to unhealthy builder when flag is false" + ); + + Ok(()) +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/tracing.rs b/rust/rollup-boost/crates/rollup-boost/src/tracing.rs new file mode 100644 index 0000000000000..fa92f2dffe8f2 --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/tracing.rs @@ -0,0 +1,201 @@ +use eyre::Context as _; +use metrics::histogram; +use opentelemetry::{ + KeyValue, global, + trace::{Status, TracerProvider as _}, +}; +use opentelemetry_otlp::WithExportConfig; +use opentelemetry_sdk::{Resource, propagation::TraceContextPropagator, trace::SpanProcessor}; +use tracing::level_filters::LevelFilter; +use tracing_opentelemetry::OpenTelemetryLayer; +use tracing_subscriber::{ + Layer, filter::Targets, fmt::writer::BoxMakeWriter, layer::SubscriberExt, +}; + +use crate::cli::{LogFormat, RollupBoostServiceArgs}; + +/// Span attribute keys that should be recorded as metric labels. +/// +/// Use caution when adding new attributes here and keep +/// label cardinality in mind. Not all span attributes make +/// appropriate labels. +pub const SPAN_ATTRIBUTE_LABELS: [&str; 4] = + ["code", "payload_source", "method", "builder_has_payload"]; + +/// Custom span processor that records span durations as histograms +#[derive(Debug)] +struct MetricsSpanProcessor; + +impl SpanProcessor for MetricsSpanProcessor { + fn on_start(&self, _span: &mut opentelemetry_sdk::trace::Span, _cx: &opentelemetry::Context) {} + + fn on_end(&self, span: opentelemetry_sdk::trace::SpanData) { + let duration = span.end_time.duration_since(span.start_time).unwrap_or_default(); + + // Remove status description to avoid cardinality explosion + let status = match span.status { + Status::Ok => "ok", + Status::Error { .. } => "error", + Status::Unset => "unset", + }; + + // Add custom labels + let labels = span + .attributes + .iter() + .filter(|attr| SPAN_ATTRIBUTE_LABELS.contains(&attr.key.as_str())) + .map(|attr| (attr.key.as_str().to_string(), attr.value.as_str().to_string())) + .chain([ + ("span_kind".to_string(), format!("{:?}", span.span_kind)), + ("status".to_string(), status.into()), + ]) + .collect::>(); + + // 0 = no difference in gas build via builder vs l2 + // > 0 = gas used by builder block is greater than l2 block + // < 0 = gas used by l2 block is greater than builder block + let gas_delta = span + .attributes + .iter() + .find(|attr| attr.key.as_str() == "gas_delta") + .map(|attr| attr.value.as_str().to_string()); + + if let Some(gas_delta) = gas_delta { + histogram!("block_building_gas_delta", &labels) + .record(gas_delta.parse::().unwrap_or_default() as f64); + } + + // 0 = no difference in tx count build via builder vs l2 + // > 0 = num txs in builder block is greater than l2 block + // < 0 = num txs in l2 block is greater than builder block + let tx_count_delta = span + .attributes + .iter() + .find(|attr| attr.key.as_str() == "tx_count_delta") + .map(|attr| attr.value.as_str().to_string()); + + if let Some(tx_count_delta) = tx_count_delta { + histogram!("block_building_tx_count_delta", &labels) + .record(tx_count_delta.parse::().unwrap_or_default() as f64); + } + + histogram!(format!("{}_duration", span.name), &labels).record(duration); + } + + fn force_flush(&self) -> opentelemetry_sdk::error::OTelSdkResult { + Ok(()) + } + + fn shutdown(&self) -> opentelemetry_sdk::error::OTelSdkResult { + Ok(()) + } +} + +pub fn init_tracing(args: &RollupBoostServiceArgs) -> eyre::Result<()> { + // Be cautious with snake_case and kebab-case here + let filter_name = "rollup_boost".to_string(); + + let global_filter = Targets::new() + .with_default(LevelFilter::INFO) + .with_target(&filter_name, LevelFilter::TRACE); + + let registry = tracing_subscriber::registry().with(global_filter); + + let log_filter = + Targets::new().with_default(LevelFilter::INFO).with_target(&filter_name, args.log_level); + + let writer = if let Some(path) = &args.log_file { + let file = std::fs::OpenOptions::new() + .create(true) + .append(true) + .open(path) + .context("Failed to open log file")?; + BoxMakeWriter::new(file) + } else { + BoxMakeWriter::new(std::io::stdout) + }; + + // Weird control flow here is required because of type system + if args.tracing { + global::set_text_map_propagator(TraceContextPropagator::new()); + let otlp_exporter = opentelemetry_otlp::SpanExporter::builder() + .with_tonic() + .with_endpoint(&args.otlp_endpoint) + .build() + .context("Failed to create OTLP exporter")?; + + // precedence: OTEL_SERVICE_NAME -> CARGO_PKG_NAME -> "rollup-boost" + let service_name = std::env::var("OTEL_SERVICE_NAME") + .ok() + .or_else(|| std::env::var("CARGO_PKG_NAME").ok()) + .unwrap_or("rollup-boost".to_string()); + + let resource = + Resource::builder().with_attribute(KeyValue::new("service.name", service_name)).build(); + + let mut provider_builder = opentelemetry_sdk::trace::SdkTracerProvider::builder() + .with_batch_exporter(otlp_exporter) + .with_resource(resource); + if args.metrics { + provider_builder = provider_builder.with_span_processor(MetricsSpanProcessor); + } + let provider = provider_builder.build(); + let tracer = provider.tracer("rollup-boost"); + + let trace_filter = Targets::new() + .with_default(LevelFilter::OFF) + .with_target(&filter_name, LevelFilter::TRACE); + + let registry = registry.with(OpenTelemetryLayer::new(tracer).with_filter(trace_filter)); + + match args.log_format { + LogFormat::Json => { + tracing::subscriber::set_global_default( + registry.with( + tracing_subscriber::fmt::layer() + .json() + .with_ansi(false) + .with_writer(writer) + .with_filter(log_filter.clone()), + ), + )?; + } + LogFormat::Text => { + tracing::subscriber::set_global_default( + registry.with( + tracing_subscriber::fmt::layer() + .with_ansi(false) + .with_writer(writer) + .with_filter(log_filter.clone()), + ), + )?; + } + } + } else { + match args.log_format { + LogFormat::Json => { + tracing::subscriber::set_global_default( + registry.with( + tracing_subscriber::fmt::layer() + .json() + .with_ansi(false) + .with_writer(writer) + .with_filter(log_filter.clone()), + ), + )?; + } + LogFormat::Text => { + tracing::subscriber::set_global_default( + registry.with( + tracing_subscriber::fmt::layer() + .with_ansi(false) + .with_writer(writer) + .with_filter(log_filter.clone()), + ), + )?; + } + } + } + + Ok(()) +} diff --git a/rust/rollup-boost/crates/rollup-boost/src/version.rs b/rust/rollup-boost/crates/rollup-boost/src/version.rs new file mode 100644 index 0000000000000..aae0ebf8e98cd --- /dev/null +++ b/rust/rollup-boost/crates/rollup-boost/src/version.rs @@ -0,0 +1,45 @@ +use metrics::gauge; + +/// The latest version from Cargo.toml. +pub const CARGO_PKG_VERSION: &str = env!("CARGO_PKG_VERSION"); + +/// The 8 character short SHA of the latest commit. +pub const VERGEN_GIT_SHA: &str = env!("VERGEN_GIT_SHA_SHORT"); + +/// The build timestamp. +pub const VERGEN_BUILD_TIMESTAMP: &str = env!("VERGEN_BUILD_TIMESTAMP"); + +pub const VERSION: VersionInfo = VersionInfo { + version: CARGO_PKG_VERSION, + build_timestamp: VERGEN_BUILD_TIMESTAMP, + git_sha: VERGEN_GIT_SHA, +}; + +/// Contains version information for the application. +#[derive(Debug, Clone)] +pub struct VersionInfo { + /// The version of the application. + pub version: &'static str, + /// The build timestamp of the application. + pub build_timestamp: &'static str, + /// The Git SHA of the build. + pub git_sha: &'static str, +} + +impl VersionInfo { + /// This exposes rollup-boost's version information over prometheus. + pub fn register_version_metrics(&self) { + let labels: [(&str, &str); 3] = [ + ("version", self.version), + ("build_timestamp", self.build_timestamp), + ("git_sha", self.git_sha), + ]; + + let gauge = gauge!("builder_info", &labels); + gauge.set(1); + } +} + +pub const fn get_version() -> &'static str { + env!("CARGO_PKG_VERSION") +} diff --git a/rust/rollup-boost/crates/websocket-proxy/.dockerignore b/rust/rollup-boost/crates/websocket-proxy/.dockerignore new file mode 100644 index 0000000000000..7bf30c2fd850b --- /dev/null +++ b/rust/rollup-boost/crates/websocket-proxy/.dockerignore @@ -0,0 +1,5 @@ +target/ +.git/ +.github/ +.gitignore +README.md \ No newline at end of file diff --git a/rust/rollup-boost/crates/websocket-proxy/.env.example b/rust/rollup-boost/crates/websocket-proxy/.env.example new file mode 100644 index 0000000000000..b6f660ecc3cb1 --- /dev/null +++ b/rust/rollup-boost/crates/websocket-proxy/.env.example @@ -0,0 +1,3 @@ +UPSTREAM_WS=ws://upstreamurl.com/rs +MAXIMUM_CONCURRENT_CONNECTIONS=2 +LOG_LEVEL=debug diff --git a/rust/rollup-boost/crates/websocket-proxy/Cargo.lock b/rust/rollup-boost/crates/websocket-proxy/Cargo.lock new file mode 100644 index 0000000000000..78e1ba82f14b7 --- /dev/null +++ b/rust/rollup-boost/crates/websocket-proxy/Cargo.lock @@ -0,0 +1,2829 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "addr2line" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" + +[[package]] +name = "ahash" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy 0.7.35", +] + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "anstream" +version = "0.6.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" + +[[package]] +name = "anstyle-parse" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e" +dependencies = [ + "anstyle", + "once_cell", + "windows-sys 0.59.0", +] + +[[package]] +name = "arc-swap" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "autocfg" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" + +[[package]] +name = "aws-lc-rs" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19b756939cb2f8dc900aa6dcd505e6e2428e9cae7ff7b028c49e3946efa70878" +dependencies = [ + "aws-lc-sys", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.28.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa9b6986f250236c27e5a204062434a773a13243d2ffc2955f37bdba4c5c6a1" +dependencies = [ + "bindgen", + "cc", + "cmake", + "dunce", + "fs_extra", +] + +[[package]] +name = "axum" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de45108900e1f9b9242f7f2e254aa3e2c029c921c258fe9e6b4217eeebd54288" +dependencies = [ + "axum-core", + "base64", + "bytes", + "form_urlencoded", + "futures-util", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-util", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sha1", + "sync_wrapper", + "tokio", + "tokio-tungstenite", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "axum-core" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68464cd0412f486726fb3373129ef5d2993f90c34bc2bc1c1e9943b2f4fc7ca6" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "backoff" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" +dependencies = [ + "getrandom 0.2.16", + "instant", + "rand 0.8.5", +] + +[[package]] +name = "backtrace" +version = "0.3.74" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" +dependencies = [ + "addr2line", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", + "windows-targets 0.52.6", +] + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "bindgen" +version = "0.69.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" +dependencies = [ + "bitflags", + "cexpr", + "clang-sys", + "itertools", + "lazy_static", + "lazycell", + "log", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn", + "which", +] + +[[package]] +name = "bitflags" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "bumpalo" +version = "3.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" + +[[package]] +name = "bytes" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" + +[[package]] +name = "cc" +version = "1.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04da6a0d40b948dfc4fa8f5bbf402b0fc1a64a28dbf7d12ffd683550f2c1b63a" +dependencies = [ + "jobserver", + "libc", + "shlex", +] + +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] + +[[package]] +name = "clap" +version = "4.5.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eccb054f56cbd38340b380d4a8e69ef1f02f1af43db2f0cc817a4774d80ae071" +dependencies = [ + "clap_builder", + "clap_derive", +] + +[[package]] +name = "clap_builder" +version = "4.5.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efd9466fac8543255d3b1fcad4762c5e116ffe808c8a3043d4263cd4fd4862a2" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "strsim", +] + +[[package]] +name = "clap_derive" +version = "4.5.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09176aae279615badda0765c0c0b3f6ed53f4709118af73cf4655d85d1530cd7" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "clap_lex" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" + +[[package]] +name = "cmake" +version = "0.1.54" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0" +dependencies = [ + "cc", +] + +[[package]] +name = "colorchoice" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" + +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "cpufeatures" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "data-encoding" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "dotenvy" +version = "0.15.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" + +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "976dd42dc7e85965fe702eb8164f21f450704bdde31faefd6471dba214cb594e" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "flashblocks-websocket-proxy" +version = "0.1.0" +dependencies = [ + "axum", + "backoff", + "clap", + "dotenvy", + "futures", + "hostname", + "http", + "metrics", + "metrics-derive", + "metrics-exporter-prometheus", + "redis", + "redis-test", + "reqwest", + "ring", + "serde_json", + "thiserror", + "tokio", + "tokio-tungstenite", + "tokio-util", + "tracing", + "tracing-subscriber", + "uuid", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + +[[package]] +name = "futures" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-executor" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-macro" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "futures-sink" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", +] + +[[package]] +name = "getrandom" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", +] + +[[package]] +name = "gimli" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" + +[[package]] +name = "glob" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" + +[[package]] +name = "h2" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75249d144030531f8dee69fe9cea04d3edf809a017ae445e2abdff6629e86633" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +dependencies = [ + "foldhash", +] + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "home" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "hostname" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a56f203cd1c76362b69e3863fd987520ac36cf70a8c92627449b2f64a8cf7d65" +dependencies = [ + "cfg-if", + "libc", + "windows-link", +] + +[[package]] +name = "http" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "hyper" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.27.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2" +dependencies = [ + "futures-util", + "http", + "hyper", + "hyper-util", + "rustls", + "rustls-native-certs", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", +] + +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "497bbc33a26fdd4af9ed9c70d63f61cf56a938375fbb32df34db9b1cd6d643f2" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http", + "http-body", + "hyper", + "libc", + "pin-project-lite", + "socket2", + "tokio", + "tower-service", + "tracing", +] + +[[package]] +name = "icu_collections" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7515e6d781098bf9f7205ab3fc7e9709d34554ae0b21ddbcb5febfa4bc7df11d" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5e8338228bdc8ab83303f16b797e177953730f601a96c25d10cb3ab0daa0cb7" + +[[package]] +name = "icu_properties" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85fb8799753b75aee8d2a21d7c14d9f38921b54b3dbda10f5a3c7a7b82dba5e2" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "idna" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "indexmap" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" +dependencies = [ + "equivalent", + "hashbrown", +] + +[[package]] +name = "instant" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "ipnet" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" + +[[package]] +name = "is_terminal_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" + +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "jobserver" +version = "0.1.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" +dependencies = [ + "getrandom 0.3.2", + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + +[[package]] +name = "libc" +version = "0.2.172" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" + +[[package]] +name = "libloading" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" +dependencies = [ + "cfg-if", + "windows-targets 0.52.6", +] + +[[package]] +name = "linux-raw-sys" +version = "0.4.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" + +[[package]] +name = "linux-raw-sys" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" + +[[package]] +name = "litemap" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" + +[[package]] +name = "lock_api" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" + +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + +[[package]] +name = "matchit" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "metrics" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25dea7ac8057892855ec285c440160265225438c3c45072613c25a4b26e98ef5" +dependencies = [ + "ahash", + "portable-atomic", +] + +[[package]] +name = "metrics-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3dbdd96ed57d565ec744cba02862d707acf373c5772d152abae6ec5c4e24f6c" +dependencies = [ + "proc-macro2", + "quote", + "regex", + "syn", +] + +[[package]] +name = "metrics-exporter-prometheus" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df88858cd28baaaf2cfc894e37789ed4184be0e1351157aec7bf3c2266c793fd" +dependencies = [ + "base64", + "http-body-util", + "hyper", + "hyper-rustls", + "hyper-util", + "indexmap", + "ipnet", + "metrics", + "metrics-util", + "quanta", + "thiserror", + "tokio", + "tracing", +] + +[[package]] +name = "metrics-util" +version = "0.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8496cc523d1f94c1385dd8f0f0c2c480b2b8aeccb5b7e4485ad6365523ae376" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", + "hashbrown", + "metrics", + "quanta", + "rand 0.9.1", + "rand_xoshiro", + "sketches-ddsketch", +] + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "miniz_oxide" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a" +dependencies = [ + "adler2", +] + +[[package]] +name = "mio" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" +dependencies = [ + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.52.0", +] + +[[package]] +name = "native-tls" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" +dependencies = [ + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework 2.11.1", + "security-framework-sys", + "tempfile", +] + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "object" +version = "0.36.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "openssl" +version = "0.10.72" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fedfea7d58a1f73118430a55da6a286e7b044961736ce96a16a17068ea25e5da" +dependencies = [ + "bitflags", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "openssl-probe" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" + +[[package]] +name = "openssl-sys" +version = "0.9.107" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8288979acd84749c744a9014b4382d42b8f7b2592847b5afb2ed29e5d16ede07" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "parking_lot" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-targets 0.52.6", +] + +[[package]] +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkg-config" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" + +[[package]] +name = "portable-atomic" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy 0.8.25", +] + +[[package]] +name = "prettyplease" +version = "0.2.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "664ec5419c51e34154eec046ebcba56312d5a2fc3b09a06da188e1ad21afadf6" +dependencies = [ + "proc-macro2", + "syn", +] + +[[package]] +name = "proc-macro2" +version = "1.0.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quanta" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bd1fe6824cea6538803de3ff1bc0cf3949024db3d43c9643024bfb33a807c0e" +dependencies = [ + "crossbeam-utils", + "libc", + "once_cell", + "raw-cpuid", + "wasi 0.11.0+wasi-snapshot-preview1", + "web-sys", + "winapi", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fbfd9d094a40bf3ae768db9361049ace4c0e04a4fd6b359518bd7b73a73dd97" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.16", +] + +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.2", +] + +[[package]] +name = "rand_xoshiro" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f703f4665700daf5512dcca5f43afa6af89f09db47fb56be587f80636bda2d41" +dependencies = [ + "rand_core 0.9.3", +] + +[[package]] +name = "raw-cpuid" +version = "11.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6df7ab838ed27997ba19a4664507e6f82b41fe6e20be42929332156e5e85146" +dependencies = [ + "bitflags", +] + +[[package]] +name = "redis" +version = "0.30.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "438a4e5f8e9aa246d6f3666d6978441bf1b37d5f417b50c4dd220be09f5fcc17" +dependencies = [ + "arc-swap", + "combine", + "itoa", + "num-bigint", + "percent-encoding", + "ryu", + "sha1_smol", + "socket2", + "url", +] + +[[package]] +name = "redis-test" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "967d3ffa2d2ead5a95b2e8561d7453c4719c9fe9dbba521673e058e513cb1c24" +dependencies = [ + "rand 0.9.1", + "redis", + "socket2", + "tempfile", +] + +[[package]] +name = "redox_syscall" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2f103c6d277498fbceb16e84d317e2a400f160f46904d5f5410848c829511a3" +dependencies = [ + "bitflags", +] + +[[package]] +name = "regex" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", +] + +[[package]] +name = "regex-automata" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + +[[package]] +name = "reqwest" +version = "0.12.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d19c46a6fdd48bc4dab94b6103fccc55d34c67cc0ad04653aad4ea2a07cd7bbb" +dependencies = [ + "base64", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-tls", + "hyper-util", + "ipnet", + "js-sys", + "log", + "mime", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite", + "rustls-pemfile", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tokio-native-tls", + "tower", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "windows-registry", +] + +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom 0.2.16", + "libc", + "untrusted", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" + +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustix" +version = "0.38.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys 0.4.15", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustix" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d97817398dd4bb2e6da002002db259209759911da105da92bec29ccb12cf58bf" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys 0.9.4", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustls" +version = "0.23.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df51b5869f3a441595eac5e8ff14d486ff285f7b8c0df8770e49c3b56351f0f0" +dependencies = [ + "aws-lc-rs", + "once_cell", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-native-certs" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" +dependencies = [ + "openssl-probe", + "rustls-pki-types", + "schannel", + "security-framework 3.2.0", +] + +[[package]] +name = "rustls-pemfile" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" + +[[package]] +name = "rustls-webpki" +version = "0.103.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fef8b8769aaccf73098557a87cd1816b4f9c7c16811c9c77142aa695c16f2c03" +dependencies = [ + "aws-lc-rs", + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "schannel" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "security-framework" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +dependencies = [ + "bitflags", + "core-foundation 0.9.4", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" +dependencies = [ + "bitflags", + "core-foundation 0.10.0", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "serde" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.140" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "serde_path_to_error" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59fab13f937fa393d08645bf3a84bdfe86e296747b506ada67bb15f10f218b2a" +dependencies = [ + "itoa", + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "sha1" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sha1_smol" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbfa15b3dddfee50a0fff136974b3e1bde555604ba463834a7eb7deb6417705d" + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signal-hook-registry" +version = "1.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9203b8055f63a2a00e2f593bb0510367fe707d7ff1e5c872de2f537b339e5410" +dependencies = [ + "libc", +] + +[[package]] +name = "sketches-ddsketch" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1e9a774a6c28142ac54bb25d25562e6bcf957493a184f15ad4eebccb23e410a" + +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + +[[package]] +name = "smallvec" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" + +[[package]] +name = "socket2" +version = "0.5.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "2.0.101" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] + +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tempfile" +version = "3.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7437ac7763b9b123ccf33c338a5cc1bac6f69b45a136c19bdd8a65e3916435bf" +dependencies = [ + "fastrand", + "getrandom 0.3.2", + "once_cell", + "rustix 1.0.5", + "windows-sys 0.59.0", +] + +[[package]] +name = "thiserror" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thread_local" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +dependencies = [ + "cfg-if", + "once_cell", +] + +[[package]] +name = "tinystr" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tokio" +version = "1.44.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6b88822cbe49de4185e3a4cbf8321dd487cf5fe0c5c65695fef6346371e9c48" +dependencies = [ + "backtrace", + "bytes", + "libc", + "mio", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "windows-sys 0.52.0", +] + +[[package]] +name = "tokio-macros" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" +dependencies = [ + "rustls", + "tokio", +] + +[[package]] +name = "tokio-tungstenite" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a9daff607c6d2bf6c16fd681ccb7eecc83e4e2cdc1ca067ffaadfca5de7f084" +dependencies = [ + "futures-util", + "log", + "native-tls", + "tokio", + "tokio-native-tls", + "tungstenite", +] + +[[package]] +name = "tokio-util" +version = "0.7.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + +[[package]] +name = "tracing" +version = "0.1.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +dependencies = [ + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-serde" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "704b1aeb7be0d0a84fc9828cae51dab5970fee5088f83d1dd7ee6f6246fc6ff1" +dependencies = [ + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex", + "serde", + "serde_json", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", + "tracing-serde", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "tungstenite" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4793cb5e56680ecbb1d843515b23b6de9a75eb04b66643e256a396d43be33c13" +dependencies = [ + "bytes", + "data-encoding", + "http", + "httparse", + "log", + "native-tls", + "rand 0.9.1", + "sha1", + "thiserror", + "utf-8", +] + +[[package]] +name = "typenum" +version = "1.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", +] + +[[package]] +name = "utf-8" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" + +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + +[[package]] +name = "uuid" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9" +dependencies = [ + "getrandom 0.3.2", +] + +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasi" +version = "0.14.2+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +dependencies = [ + "wit-bindgen-rt", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +dependencies = [ + "bumpalo", + "log", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +dependencies = [ + "cfg-if", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "web-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "which" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" +dependencies = [ + "either", + "home", + "once_cell", + "rustix 0.38.44", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-link" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" + +[[package]] +name = "windows-registry" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3" +dependencies = [ + "windows-result", + "windows-strings", + "windows-targets 0.53.0", +] + +[[package]] +name = "windows-result" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c64fd11a4fd95df68efcfee5f44a294fe71b8bc6a91993e2791938abcc712252" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1e4c7e8ceaaf9cb7d7507c974735728ab453b67ef8f18febdd7c11fe59dca8b" +dependencies = [ + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + +[[package]] +name = "wit-bindgen-rt" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" +dependencies = [ + "bitflags", +] + +[[package]] +name = "write16" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" + +[[package]] +name = "yoke" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "zerocopy" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +dependencies = [ + "zerocopy-derive 0.7.35", +] + +[[package]] +name = "zerocopy" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" +dependencies = [ + "zerocopy-derive 0.8.25", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "zeroize" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" + +[[package]] +name = "zerovec" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] diff --git a/rust/rollup-boost/crates/websocket-proxy/Cargo.toml b/rust/rollup-boost/crates/websocket-proxy/Cargo.toml new file mode 100644 index 0000000000000..93f6a4f7589b6 --- /dev/null +++ b/rust/rollup-boost/crates/websocket-proxy/Cargo.toml @@ -0,0 +1,45 @@ +[package] +name = "flashblocks-websocket-proxy" +version = "0.1.0" +edition.workspace = true +description = "Flashblocks Websocket Proxy is a service that subscribes to new Flashblocks broadcasts them to downstream RPC nodes" +rust-version.workspace = true +license.workspace = true +readme = "README.md" + +[dependencies] +tracing.workspace = true +tracing-subscriber = { workspace = true, features = ["env-filter", "json", "fmt", "std"] } +clap = { workspace = true, features = ["derive", "env"] } +futures.workspace = true +thiserror.workspace = true +serde_json = { workspace = true, features = ["std"] } +metrics.workspace = true +metrics-derive.workspace = true +tokio = { workspace = true, features = ["full"] } +tokio-tungstenite = { workspace = true, features = ["native-tls"] } +metrics-exporter-prometheus = { workspace = true, features = [ + "http-listener", +] } +http.workspace = true +axum = { workspace = true, features = ["ws"] } +dotenvy.workspace = true +backoff.workspace = true +reqwest = { workspace = true, features = ["native-tls"] } + +hostname.workspace = true +redis.workspace = true +uuid.workspace = true +tokio-util.workspace = true +brotli = { workspace = true, features = ["std"] } + +[lib] +name = "websocket_proxy" + +[dev-dependencies] +testcontainers.workspace = true +testcontainers-modules.workspace = true +redis-test.workspace = true + +[features] +integration = [] diff --git a/rust/rollup-boost/crates/websocket-proxy/Dockerfile b/rust/rollup-boost/crates/websocket-proxy/Dockerfile new file mode 100644 index 0000000000000..76d62674b0484 --- /dev/null +++ b/rust/rollup-boost/crates/websocket-proxy/Dockerfile @@ -0,0 +1,18 @@ +FROM rust:1.88 AS builder + +WORKDIR /app + +ARG BINARY="flashblocks-websocket-proxy" +ARG FEATURES + +COPY . . + +RUN cargo build --release --features="$FEATURES" --package=${BINARY} + +FROM gcr.io/distroless/cc-debian12 +WORKDIR /app + +ARG BINARY="flashblocks-websocket-proxy" +COPY --from=builder /app/target/release/${BINARY} /usr/local/bin/ + +ENTRYPOINT ["/usr/local/bin/flashblocks-websocket-proxy"] diff --git a/rust/rollup-boost/crates/websocket-proxy/README.md b/rust/rollup-boost/crates/websocket-proxy/README.md new file mode 100644 index 0000000000000..65c7f649cace9 --- /dev/null +++ b/rust/rollup-boost/crates/websocket-proxy/README.md @@ -0,0 +1,70 @@ +# Flashblocks Websocket Proxy + +## Overview +The Flashblocks Websocket Proxy is a service that subscribes to new Flashblocks from +[rollup-boost](https://github.com/flashbots/rollup-boost) on the sequencer. Then broadcasts them out to any downstream +RPC nodes. Minimizing the number of connections to the sequencer and restricting access. + +> ⚠️ **Warning** +> +> This is currently alpha software -- deploy at your own risk! +> +> Currently, this project is a one-directional generic websocket proxy. It doesn't inspect any data or validate clients. +> This may not always be the case. + +## For Developers + +### Contributing + +### Building & Testing +You can build and test the project using [Cargo](https://doc.rust-lang.org/cargo/). Some useful commands are: +``` +# Build the project +cargo build + +# Run all the tests (requires local version of redis to be installed) +cargo test --all-features +``` + +### Deployment + +Builds of the websocket proxy [are provided](https://github.com/base/flashblocks-websocket-proxy/pkgs/container/flashblocks-websocket-proxy). +The only configuration required is the rollup-boost URL to proxy. You can set this via an env var `UPSTREAM_WS` or a flag `--upstream-ws`. + + +You can see a full list of parameters by running: + +`docker run ghcr.io/base/flashblocks-websocket-proxy:master --help` + +### Redis Integration + +The proxy supports distributed rate limiting with Redis. This is useful when running multiple instances of the proxy behind a load balancer, as it allows rate limits to be enforced across all instances. + +To enable Redis integration, use the following parameters: + +- `--redis-url` - Redis connection URL (e.g., `redis://localhost:6379`) +- `--redis-key-prefix` - Prefix for Redis keys (default: `flashblocks`) + +Example: + +```bash +docker run ghcr.io/base/flashblocks-websocket-proxy:master \ + --upstream-ws wss://your-sequencer-endpoint \ + --redis-url redis://redis:6379 \ + --global-connections-limit 1000 \ + --per-ip-connections-limit 10 +``` + +When Redis is enabled, the following features are available: + +- Distributed rate limiting across multiple proxy instances +- Connection tracking persists even if the proxy instance restarts +- More accurate global connection limiting in multi-instance deployments + +If the Redis connection fails, the proxy will automatically fall back to in-memory rate limiting. + +### Brotli Compression + +The proxy supports compressing messages to downstream clients using Brotli. + +To enable this, pass the parameter `--enable-compression` \ No newline at end of file diff --git a/rust/rollup-boost/crates/websocket-proxy/src/auth.rs b/rust/rollup-boost/crates/websocket-proxy/src/auth.rs new file mode 100644 index 0000000000000..5b9ba6c2eb8e7 --- /dev/null +++ b/rust/rollup-boost/crates/websocket-proxy/src/auth.rs @@ -0,0 +1,205 @@ +use crate::auth::AuthenticationParseError::{ + DuplicateAPIKeyArgument, DuplicateApplicationArgument, MissingAPIKeyArgument, + MissingApplicationArgument, MissingRateLimitArgument, NoData, TooManyComponents, +}; +use std::collections::{HashMap, HashSet}; + +#[derive(Clone, Debug)] +pub struct Authentication { + key_to_application: HashMap, + app_to_rate_limit: HashMap, +} + +#[derive(Debug, PartialEq)] +pub enum AuthenticationParseError { + NoData(), + MissingApplicationArgument(String), + MissingAPIKeyArgument(String), + MissingRateLimitArgument(String), + TooManyComponents(String), + DuplicateApplicationArgument(String), + DuplicateAPIKeyArgument(String), +} + +impl std::fmt::Display for AuthenticationParseError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + NoData() => write!(f, "No API Keys Provided"), + MissingApplicationArgument(arg) => write!(f, "Missing application argument: [{arg}]"), + MissingAPIKeyArgument(app) => write!(f, "Missing API Key argument: [{app}]"), + MissingRateLimitArgument(app) => write!(f, "Missing rate limit argument: [{app}]"), + TooManyComponents(app) => write!(f, "Too many components: [{app}]"), + DuplicateApplicationArgument(app) => { + write!(f, "Duplicate application argument: [{app}]") + } + DuplicateAPIKeyArgument(app) => write!(f, "Duplicate API key: [{app}]"), + } + } +} + +impl std::error::Error for AuthenticationParseError {} + +impl TryFrom> for Authentication { + type Error = AuthenticationParseError; + + fn try_from(args: Vec) -> Result { + let mut applications = HashSet::new(); + let mut key_to_application: HashMap = HashMap::new(); + let mut app_to_rate_limit: HashMap = HashMap::new(); + + if args.is_empty() { + return Err(NoData()); + } + + for arg in args { + let mut parts = arg.split(":"); + let app = parts.next().ok_or(MissingApplicationArgument(arg.clone()))?; + if app.is_empty() { + return Err(MissingApplicationArgument(arg.clone())); + } + + let key = parts.next().ok_or(MissingAPIKeyArgument(app.to_string()))?; + if key.is_empty() { + return Err(MissingAPIKeyArgument(app.to_string())); + } + + let rate_limit = parts.next().ok_or(MissingRateLimitArgument(app.to_string()))?; + if rate_limit.is_empty() { + return Err(MissingRateLimitArgument(app.to_string())); + } + + if parts.count() > 0 { + return Err(TooManyComponents(app.to_string())); + } + + if applications.contains(app) { + return Err(DuplicateApplicationArgument(app.to_string())); + } + + if key_to_application.contains_key(key) { + return Err(DuplicateAPIKeyArgument(app.to_string())); + } + + applications.insert(app.to_string()); + key_to_application.insert(key.to_string(), app.to_string()); + app_to_rate_limit.insert(app.to_string(), rate_limit.parse().unwrap()); + } + + Ok(Self { key_to_application, app_to_rate_limit }) + } +} + +impl Authentication { + pub fn none() -> Self { + Self { key_to_application: HashMap::new(), app_to_rate_limit: HashMap::new() } + } + + #[allow(dead_code)] + pub fn new( + api_keys: HashMap, + app_to_rate_limit: HashMap, + ) -> Self { + Self { key_to_application: api_keys, app_to_rate_limit } + } + + pub fn get_application_for_key(&self, api_key: &String) -> Option<&String> { + self.key_to_application.get(api_key) + } + + pub fn get_rate_limits(&self) -> HashMap { + self.app_to_rate_limit.clone() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parsing() { + let auth = Authentication::try_from(vec![ + "app1:key1:10".to_string(), + "app2:key2:10".to_string(), + "app3:key3:10".to_string(), + ]) + .unwrap(); + + assert_eq!(auth.key_to_application.len(), 3); + assert_eq!(auth.key_to_application["key1"], "app1"); + assert_eq!(auth.key_to_application["key2"], "app2"); + assert_eq!(auth.key_to_application["key3"], "app3"); + assert_eq!(auth.app_to_rate_limit.len(), 3); + assert_eq!(auth.app_to_rate_limit["app1"], 10); + assert_eq!(auth.app_to_rate_limit["app2"], 10); + assert_eq!(auth.app_to_rate_limit["app3"], 10); + + let auth = Authentication::try_from(vec![ + "app1:key1:10".to_string(), + "".to_string(), + "app3:key3:10".to_string(), + ]); + assert!(auth.is_err()); + assert_eq!(auth.unwrap_err(), MissingApplicationArgument("".into())); + + let auth = Authentication::try_from(vec![ + "app1:key1:10".to_string(), + "app2".to_string(), + "app3:key3:10".to_string(), + ]); + assert!(auth.is_err()); + assert_eq!(auth.unwrap_err(), MissingAPIKeyArgument("app2".into())); + + let auth = Authentication::try_from(vec![ + "app1:key1:10".to_string(), + "app2:key2:10".to_string(), + "app3:key3".to_string(), + ]); + assert!(auth.is_err()); + assert_eq!(auth.unwrap_err(), MissingRateLimitArgument("app3".into())); + + let auth = Authentication::try_from(vec![ + "app1:key1:10".to_string(), + ":".to_string(), + "app3:key3:10".to_string(), + ]); + assert!(auth.is_err()); + assert_eq!(auth.unwrap_err(), MissingApplicationArgument(":".into())); + + let auth = Authentication::try_from(vec![ + "app1:key1:10".to_string(), + "app2:".to_string(), + "app3:key3:10".to_string(), + ]); + assert!(auth.is_err()); + assert_eq!(auth.unwrap_err(), MissingAPIKeyArgument("app2".into())); + + let auth = Authentication::try_from(vec![ + "app1:key1:10".to_string(), + "app2:key2:10".to_string(), + "app3:key3:".to_string(), + ]); + assert!(auth.is_err()); + assert_eq!(auth.unwrap_err(), MissingRateLimitArgument("app3".into())); + + let auth = Authentication::try_from(vec![ + "app1:key1:10".to_string(), + "app2:key2:10:unexpected2".to_string(), + "app3:key3:10".to_string(), + ]); + assert!(auth.is_err()); + assert_eq!(auth.unwrap_err(), TooManyComponents("app2".into())); + + let auth = Authentication::try_from(vec![ + "app1:key1:10".to_string(), + "app1:key3:10".to_string(), + "app2:key2:10".to_string(), + ]); + assert!(auth.is_err()); + assert_eq!(auth.unwrap_err(), DuplicateApplicationArgument("app1".into())); + + let auth = + Authentication::try_from(vec!["app1:key1:10".to_string(), "app2:key1:10".to_string()]); + assert!(auth.is_err()); + assert_eq!(auth.unwrap_err(), DuplicateAPIKeyArgument("app2".into())); + } +} diff --git a/rust/rollup-boost/crates/websocket-proxy/src/client.rs b/rust/rollup-boost/crates/websocket-proxy/src/client.rs new file mode 100644 index 0000000000000..bcc5b42a12a14 --- /dev/null +++ b/rust/rollup-boost/crates/websocket-proxy/src/client.rs @@ -0,0 +1,19 @@ +use crate::rate_limit::Ticket; +use axum::extract::ws::WebSocket; +use std::net::IpAddr; + +pub struct ClientConnection { + client_addr: IpAddr, + _ticket: Ticket, + pub(crate) websocket: WebSocket, +} + +impl ClientConnection { + pub fn new(client_addr: IpAddr, ticket: Ticket, websocket: WebSocket) -> Self { + Self { client_addr, _ticket: ticket, websocket } + } + + pub fn id(&self) -> String { + self.client_addr.to_string() + } +} diff --git a/rust/rollup-boost/crates/websocket-proxy/src/lib.rs b/rust/rollup-boost/crates/websocket-proxy/src/lib.rs new file mode 100644 index 0000000000000..ec369b495f707 --- /dev/null +++ b/rust/rollup-boost/crates/websocket-proxy/src/lib.rs @@ -0,0 +1,15 @@ +pub mod auth; +pub mod client; +pub mod metrics; +pub mod rate_limit; +pub mod registry; +pub mod server; +pub mod subscriber; + +pub use auth::*; +pub use client::*; +pub use metrics::*; +pub use rate_limit::*; +pub use registry::*; +pub use server::*; +pub use subscriber::*; diff --git a/rust/rollup-boost/crates/websocket-proxy/src/main.rs b/rust/rollup-boost/crates/websocket-proxy/src/main.rs new file mode 100644 index 0000000000000..36575d4e8dd66 --- /dev/null +++ b/rust/rollup-boost/crates/websocket-proxy/src/main.rs @@ -0,0 +1,445 @@ +mod auth; +mod client; +mod metrics; +mod rate_limit; +mod registry; +mod server; +mod subscriber; + +use axum::{extract::ws::Message, http::Uri}; +use clap::Parser; +use dotenvy::dotenv; +use metrics::Metrics; +use metrics_exporter_prometheus::PrometheusBuilder; +use rate_limit::{InMemoryRateLimit, RateLimit, RedisRateLimit}; +use registry::Registry; +use server::Server; +use std::{collections::HashMap, io::Write, net::SocketAddr, sync::Arc, time::Duration}; +use subscriber::{SubscriberOptions, WebsocketSubscriber}; +use tokio::{ + signal::unix::{SignalKind, signal}, + sync::broadcast, + time::interval, +}; +use tokio_util::sync::CancellationToken; +use tracing::{Level, error, info, trace, warn}; +use tracing_subscriber::EnvFilter; + +#[derive(Parser, Debug)] +#[command(author, version, about)] +struct Args { + #[arg( + long, + env, + default_value = "0.0.0.0:8545", + help = "The address and port to listen on for incoming connections" + )] + listen_addr: SocketAddr, + + #[arg( + long, + env, + value_delimiter = ',', + help = "WebSocket URI of the upstream server to connect to" + )] + upstream_ws: Vec, + + #[arg( + long, + env, + default_value = "20", + help = "Number of messages to buffer for lagging clients" + )] + message_buffer_size: usize, + + #[arg( + long, + env, + default_value = "100", + help = "Maximum number of concurrently connected clients per instance" + )] + instance_connection_limit: usize, + + #[arg( + long, + env, + default_value = "10", + help = "Maximum number of concurrently connected clients per IP. 0 here means no limit." + )] + per_ip_connection_limit: usize, + + #[arg( + long, + env, + default_value = "false", + help = "Enable brotli compression on messages to downstream clients" + )] + enable_compression: bool, + + #[arg( + long, + env, + default_value = "X-Forwarded-For", + help = "Header to use to determine the clients origin IP" + )] + ip_addr_http_header: String, + + #[arg(long, env, default_value = "info")] + log_level: Level, + + /// Format for logs, can be json or text + #[arg(long, env, default_value = "text")] + log_format: String, + + /// Enable Prometheus metrics + #[arg(long, env, default_value = "true")] + metrics: bool, + + /// API Keys, if not provided will be an unauthenticated endpoint, should be in the format + /// `::,::,..` + #[arg(long, env, value_delimiter = ',', help = "API keys to allow")] + api_keys: Vec, + + /// Address to run the metrics server on + #[arg(long, env, default_value = "0.0.0.0:9000")] + metrics_addr: SocketAddr, + + /// Tags to add to every metrics emitted, should be in the format --metrics-global-labels + /// label1=value1,label2=value2 + #[arg(long, env, default_value = "")] + metrics_global_labels: String, + + /// Add the hostname as a label to all Prometheus metrics + #[arg(long, env, default_value = "false")] + metrics_host_label: bool, + + /// Maximum backoff allowed for upstream connections + #[arg(long, env, default_value = "20000")] + subscriber_max_interval_ms: u64, + + /// Interval in milliseconds between ping messages sent to upstream servers to detect + /// unresponsive connections + #[arg(long, env, default_value = "2000")] + subscriber_ping_interval_ms: u64, + + /// Timeout in milliseconds to wait for pong responses from upstream servers before considering + /// the connection dead + #[arg(long, env, default_value = "4000")] + subscriber_pong_timeout_ms: u64, + + #[arg( + long, + env, + help = "Redis URL for distributed rate limiting (e.g., redis://localhost:6379). If not provided, in-memory rate limiting will be used." + )] + redis_url: Option, + + #[arg(long, env, default_value = "flashblocks", help = "Prefix for Redis keys")] + redis_key_prefix: String, + + #[arg(long, env, default_value = "false", help = "Enable ping/pong client health checks")] + client_ping_enabled: bool, + + #[arg( + long, + env, + default_value = "15000", + help = "Interval in milliseconds to send ping messages to clients" + )] + client_ping_interval_ms: u64, + + #[arg( + long, + env, + default_value = "30000", + help = "Timeout in milliseconds to wait for pong response from clients" + )] + client_pong_timeout_ms: u64, +} + +#[tokio::main] +async fn main() { + dotenv().ok(); + let args = Args::parse(); + + let log_format = args.log_format.to_lowercase(); + let log_level = args.log_level.to_string(); + + if log_format == "json" { + tracing_subscriber::fmt() + .json() + .with_env_filter(EnvFilter::new(log_level)) + .with_ansi(false) + .init(); + } else { + tracing_subscriber::fmt() + .with_env_filter(EnvFilter::new(log_level)) + .with_ansi(false) + .init(); + } + + let api_keys: Vec = args.api_keys.into_iter().filter(|s| !s.is_empty()).collect(); + let authentication = if api_keys.is_empty() { + None + } else { + match auth::Authentication::try_from(api_keys) { + Ok(auth) => Some(auth), + Err(e) => { + panic!("Failed to parse API Keys: {e}") + } + } + }; + + if args.metrics { + info!(message = "starting metrics server", address = args.metrics_addr.to_string()); + + let mut builder = PrometheusBuilder::new().with_http_listener(args.metrics_addr); + + if args.metrics_host_label { + let hostname = hostname::get() + .expect("could not find hostname") + .into_string() + .expect("could not convert hostname to string"); + builder = builder.add_global_label("hostname", hostname); + } + + for (key, value) in parse_global_metrics(args.metrics_global_labels) { + builder = builder.add_global_label(key, value); + } + + builder.install().expect("failed to setup Prometheus endpoint") + } + + // Validate that we have at least one upstream URI + if args.upstream_ws.is_empty() { + error!(message = "no upstream URIs provided"); + panic!("No upstream URIs provided"); + } + + info!(message = "using upstream URIs", uris = ?args.upstream_ws); + + let metrics = Arc::new(Metrics::default()); + let metrics_clone = metrics.clone(); + + let (send, _rec) = broadcast::channel(args.message_buffer_size); + let sender = send.clone(); + + let listener = move |data: Vec| { + trace!(message = "received data", data = ?data); + // Subtract one from receiver count, as we have to keep one receiver open at all times (see + // _rec) to avoid the channel being closed. However this is not an active client + // connection. + metrics_clone.active_connections.set((send.receiver_count() - 1) as f64); + + let message_data = if args.enable_compression { + let data_bytes = data.as_slice(); + let mut compressed_data_bytes = Vec::new(); + { + let mut compressor = + brotli::CompressorWriter::new(&mut compressed_data_bytes, 4096, 5, 22); + compressor.write_all(data_bytes).unwrap(); + } + compressed_data_bytes + } else { + data + }; + + match send.send(message_data.into()) { + Ok(_) => (), + Err(e) => error!(message = "failed to send data", error = e.to_string()), + } + }; + + let token = CancellationToken::new(); + let mut subscriber_tasks = Vec::new(); + + // Start a subscriber for each upstream URI + for (index, uri) in args.upstream_ws.iter().enumerate() { + let uri_clone = uri.clone(); + let listener_clone = listener.clone(); + let token_clone = token.clone(); + let metrics_clone = metrics.clone(); + + let options = SubscriberOptions::default() + .with_max_backoff_interval(Duration::from_millis(args.subscriber_max_interval_ms)) + .with_ping_interval(Duration::from_millis(args.subscriber_ping_interval_ms)) + .with_pong_timeout(Duration::from_millis(args.subscriber_pong_timeout_ms)) + .with_backoff_initial_interval(Duration::from_millis(500)) + .with_initial_grace_period(Duration::from_secs(5)); + + let mut subscriber = + WebsocketSubscriber::new(uri_clone.clone(), listener_clone, metrics_clone, options); + + let task = tokio::spawn(async move { + info!(message = "starting subscriber", index = index, uri = uri_clone.to_string()); + subscriber.run(token_clone).await; + }); + + subscriber_tasks.push(task); + } + + let ping_task = if args.client_ping_enabled { + let ping_sender = sender.clone(); + let ping_token = token.clone(); + let ping_interval = args.client_ping_interval_ms; + + tokio::spawn(async move { + let mut interval = interval(Duration::from_millis(ping_interval)); + info!(message = "starting ping sender", interval_ms = ping_interval); + + loop { + tokio::select! { + _ = interval.tick() => { + match ping_sender.send(Message::Ping(vec![].into())) { + Ok(_) => trace!(message = "sent ping to all clients"), + Err(e) => error!(message = "failed to send ping", error = e.to_string()), + } + } + _ = ping_token.cancelled() => { + info!(message = "ping sender shutting down"); + break; + } + } + } + }) + } else { + tokio::spawn(std::future::pending()) + }; + + let registry = Registry::new( + sender, + metrics.clone(), + args.client_ping_enabled, + args.client_pong_timeout_ms, + ); + + let app_rate_limits = + if let Some(auth) = &authentication { auth.get_rate_limits() } else { HashMap::new() }; + + let rate_limiter = match &args.redis_url { + Some(redis_url) => { + info!(message = "Using Redis rate limiter", redis_url = redis_url); + match RedisRateLimit::new( + redis_url, + args.instance_connection_limit, + args.per_ip_connection_limit, + app_rate_limits.clone(), + &args.redis_key_prefix, + ) { + Ok(limiter) => { + info!(message = "Connected to Redis successfully"); + Arc::new(limiter) as Arc + } + Err(e) => { + error!( + message = + "Failed to connect to Redis, falling back to in-memory rate limiting", + error = e.to_string() + ); + Arc::new(InMemoryRateLimit::new( + args.instance_connection_limit, + args.per_ip_connection_limit, + app_rate_limits.clone(), + )) as Arc + } + } + } + None => { + info!(message = "Using in-memory rate limiter"); + Arc::new(InMemoryRateLimit::new( + args.instance_connection_limit, + args.per_ip_connection_limit, + app_rate_limits, + )) as Arc + } + }; + + let server = Server::new( + args.listen_addr, + registry.clone(), + metrics, + rate_limiter, + authentication, + args.ip_addr_http_header, + ); + let server_task = server.listen(token.clone()); + + let mut interrupt = signal(SignalKind::interrupt()).unwrap(); + let mut terminate = signal(SignalKind::terminate()).unwrap(); + + tokio::select! { + _ = futures::future::join_all(subscriber_tasks) => { + info!("all subscriber tasks terminated"); + token.cancel(); + }, + _ = server_task => { + info!("server task terminated"); + token.cancel(); + }, + _ = ping_task => { + info!("ping task terminated"); + token.cancel(); + }, + _ = interrupt.recv() => { + info!("process interrupted, shutting down"); + token.cancel(); + } + _ = terminate.recv() => { + info!("process terminated, shutting down"); + token.cancel(); + } + } +} + +fn parse_global_metrics(metrics: String) -> Vec<(String, String)> { + let mut result = Vec::new(); + + for metric in metrics.split(',') { + if metric.is_empty() { + continue; + } + + let parts = metric.splitn(2, '=').map(|s| s.to_string()).collect::>(); + + if parts.len() != 2 { + warn!(message = "malformed global metric: invalid count", metric = metric); + continue; + } + + let label = parts[0].to_string(); + let value = parts[1].to_string(); + + if label.is_empty() || value.is_empty() { + warn!(message = "malformed global metric: empty value", metric = metric); + continue; + } + + result.push((label, value)); + } + + result +} + +#[cfg(test)] +mod test { + use crate::parse_global_metrics; + + #[test] + fn test_parse_global_metrics() { + assert_eq!(parse_global_metrics("".into()), Vec::<(String, String)>::new(),); + + assert_eq!(parse_global_metrics("key=value".into()), vec![("key".into(), "value".into())]); + + assert_eq!( + parse_global_metrics("key=value,key2=value2".into()), + vec![("key".into(), "value".into()), ("key2".into(), "value2".into())], + ); + + assert_eq!(parse_global_metrics("gibberish".into()), Vec::new()); + + assert_eq!( + parse_global_metrics("key=value,key2=,".into()), + vec![("key".into(), "value".into())], + ); + } +} diff --git a/rust/rollup-boost/crates/websocket-proxy/src/metrics.rs b/rust/rollup-boost/crates/websocket-proxy/src/metrics.rs new file mode 100644 index 0000000000000..4b3316c40eea6 --- /dev/null +++ b/rust/rollup-boost/crates/websocket-proxy/src/metrics.rs @@ -0,0 +1,77 @@ +use axum::extract::ws::Message; +use metrics::{Counter, Gauge, counter}; +use metrics_derive::Metrics; +#[derive(Metrics)] +#[metrics(scope = "websocket_proxy")] +pub struct Metrics { + #[metric(describe = "Messages sent to clients")] + pub sent_messages: Counter, + + #[metric(describe = "Count of messages that were unable to be sent")] + pub failed_messages: Counter, + + #[metric(describe = "Count of new connections opened")] + pub new_connections: Counter, + + #[metric(describe = "Count of number of connections closed")] + pub closed_connections: Counter, + + #[metric(describe = "Count the number of connections which lagged and then disconnected")] + pub lagged_connections: Counter, + + #[metric(describe = "Number of client connections currently open")] + pub active_connections: Gauge, + + #[metric(describe = "Count of rate limited request")] + pub rate_limited_requests: Counter, + + #[metric(describe = "Count of unauthorized requests with invalid API keys")] + pub unauthorized_requests: Counter, + + #[metric(describe = "Count of times upstream receiver was closed/errored")] + pub upstream_errors: Counter, + + #[metric(describe = "Number of active upstream connections")] + pub upstream_connections: Gauge, + + #[metric(describe = "Number of upstream connection attempts")] + pub upstream_connection_attempts: Counter, + + #[metric(describe = "Number of successful upstream connections")] + pub upstream_connection_successes: Counter, + + #[metric(describe = "Number of failed upstream connection attempts")] + pub upstream_connection_failures: Counter, + + #[metric(describe = "Total bytes broadcasted to clients")] + pub bytes_broadcasted: Counter, + + #[metric(describe = "Count of clients disconnected due to pong timeout")] + pub client_pong_disconnects: Counter, +} + +fn get_message_size(msg: &Message) -> u64 { + match msg { + Message::Text(text) => text.len() as u64, + Message::Binary(data) => data.len() as u64, + Message::Ping(data) => data.len() as u64, + Message::Pong(data) => data.len() as u64, + Message::Close(_) => 0, + } +} + +impl Metrics { + pub fn proxy_connections_by_app(&self, app: &str) { + counter!("websocket_proxy.connections_by_app", "app" => app.to_owned()).increment(1); + } + + pub fn message_received_from_upstream(&self, upstream: &str) { + counter!("websocket_proxy.upstream_messages", "upstream" => upstream.to_owned()) + .increment(1); + } + + pub fn record_message_sent(&self, msg: &Message) { + self.sent_messages.increment(1); + self.bytes_broadcasted.increment(get_message_size(msg)); + } +} diff --git a/rust/rollup-boost/crates/websocket-proxy/src/rate_limit.rs b/rust/rollup-boost/crates/websocket-proxy/src/rate_limit.rs new file mode 100644 index 0000000000000..2ddf9c3f11796 --- /dev/null +++ b/rust/rollup-boost/crates/websocket-proxy/src/rate_limit.rs @@ -0,0 +1,1144 @@ +use std::{ + collections::HashMap, + net::IpAddr, + sync::{Arc, Mutex}, +}; +use tracing::{debug, error, warn}; + +use thiserror::Error; +use tokio::sync::{OwnedSemaphorePermit, Semaphore}; + +use redis::{Client, Commands, RedisError}; +use std::{ + sync::atomic::{AtomicBool, Ordering}, + time::{Duration, SystemTime}, +}; +use uuid::Uuid; + +#[derive(Error, Debug)] +pub enum RateLimitError { + #[error("Rate Limit Reached: {reason}")] + Limit { reason: String }, +} + +#[clippy::has_significant_drop] +pub struct Ticket { + addr: IpAddr, + app: Option, + _permit: OwnedSemaphorePermit, + rate_limiter: Arc, +} + +impl Drop for Ticket { + fn drop(&mut self) { + self.rate_limiter.release(self.addr, self.app.clone()) + } +} + +pub trait RateLimit: Send + Sync { + fn try_acquire( + self: Arc, + addr: IpAddr, + app: Option, + ) -> Result; + + fn release(&self, addr: IpAddr, app: Option); +} + +struct Inner { + active_connections_per_ip: HashMap, + active_connections_per_app: HashMap, + semaphore: Arc, +} + +pub struct InMemoryRateLimit { + per_ip_limit: usize, + per_app_limit: HashMap, + inner: Mutex, +} + +impl InMemoryRateLimit { + pub fn new( + instance_limit: usize, + per_ip_limit: usize, + per_app_limit: HashMap, + ) -> Self { + Self { + per_ip_limit, + per_app_limit, + inner: Mutex::new(Inner { + active_connections_per_ip: HashMap::new(), + active_connections_per_app: HashMap::new(), + semaphore: Arc::new(Semaphore::new(instance_limit)), + }), + } + } +} + +impl RateLimit for InMemoryRateLimit { + fn try_acquire( + self: Arc, + addr: IpAddr, + app: Option, + ) -> Result { + let mut inner = self.inner.lock().unwrap(); + + let permit = inner + .semaphore + .clone() + .try_acquire_owned() + .map_err(|_| RateLimitError::Limit { reason: "Global limit".to_owned() })?; + + if self.per_ip_limit > 0 { + let current_count = *inner.active_connections_per_ip.get(&addr).unwrap_or(&0); + + if current_count + 1 > self.per_ip_limit { + debug!( + message = "Rate limit exceeded, trying to acquire", + client = addr.to_string() + ); + return Err(RateLimitError::Limit { reason: String::from("IP limit exceeded") }); + } + + let new_count = current_count + 1; + inner.active_connections_per_ip.insert(addr, new_count); + } + + if let Some(app) = app.clone() { + let current_count = *inner.active_connections_per_app.get(&app).unwrap_or(&0); + + if current_count + 1 > *self.per_app_limit.get(&app).unwrap_or(&0) { + debug!( + message = "Rate limit exceeded, trying to acquire", + client = addr.to_string() + ); + return Err(RateLimitError::Limit { reason: String::from("App limit exceeded") }); + } + + let new_count = current_count + 1; + inner.active_connections_per_app.insert(app, new_count); + } + + Ok(Ticket { addr, app, _permit: permit, rate_limiter: self.clone() }) + } + + fn release(&self, addr: IpAddr, app: Option) { + let mut inner = self.inner.lock().unwrap(); + + if self.per_ip_limit > 0 { + let current_count = *inner.active_connections_per_ip.get(&addr).unwrap_or(&0); + + match current_count { + 0 => { + warn!( + message = "ip counting is not accurate -- unexpected underflow", + client = addr.to_string() + ); + inner.active_connections_per_ip.remove(&addr); + } + 1 => { + inner.active_connections_per_ip.remove(&addr); + } + _ => { + inner.active_connections_per_ip.insert(addr, current_count - 1); + } + } + } + + if let Some(app) = app { + let current_count = *inner.active_connections_per_app.get(&app).unwrap_or(&0); + + match current_count { + 0 => { + warn!( + message = "app counting is not accurate -- unexpected underflow", + client = app + ); + inner.active_connections_per_app.remove(&app); + } + 1 => { + inner.active_connections_per_app.remove(&app); + } + _ => { + inner.active_connections_per_app.insert(app, current_count - 1); + } + } + } + } +} + +pub struct RedisRateLimit { + redis_client: Client, + instance_limit: usize, + per_ip_limit: usize, + per_app_limit: HashMap, + semaphore: Arc, + key_prefix: String, + instance_id: String, + heartbeat_interval: Duration, + heartbeat_ttl: Duration, + background_tasks_started: AtomicBool, +} + +impl RedisRateLimit { + pub fn new( + redis_url: &str, + instance_limit: usize, + per_ip_limit: usize, + per_app_limit: HashMap, + key_prefix: &str, + ) -> Result { + let client = Client::open(redis_url)?; + let instance_id = Uuid::new_v4().to_string(); + + let heartbeat_interval = Duration::from_secs(10); + let heartbeat_ttl = Duration::from_secs(30); + + let rate_limiter = Self { + redis_client: client, + instance_limit, + per_ip_limit, + per_app_limit, + semaphore: Arc::new(Semaphore::new(instance_limit)), + key_prefix: key_prefix.to_string(), + instance_id, + heartbeat_interval, + heartbeat_ttl, + background_tasks_started: AtomicBool::new(false), + }; + + if let Err(e) = rate_limiter.register_instance() { + error!(message = "Failed to register instance in Redis", error = e.to_string()); + } + + Ok(rate_limiter) + } + + pub fn start_background_tasks(self: Arc) { + if self.background_tasks_started.swap(true, Ordering::SeqCst) { + return; + } + + debug!( + message = "Starting background heartbeat and cleanup tasks", + instance_id = self.instance_id + ); + + let self_clone = self.clone(); + tokio::spawn(async move { + loop { + if let Err(e) = self_clone.update_heartbeat() { + error!( + message = "Failed to update heartbeat in background task", + error = e.to_string() + ); + } + + if let Err(e) = self_clone.cleanup_stale_instances() { + error!( + message = "Failed to cleanup stale instances in background task", + error = e.to_string() + ); + } + + tokio::time::sleep(self_clone.heartbeat_interval / 2).await; + } + }); + } + + fn register_instance(&self) -> Result<(), RedisError> { + self.update_heartbeat()?; + debug!(message = "Registered instance in Redis", instance_id = self.instance_id); + + Ok(()) + } + + fn update_heartbeat(&self) -> Result<(), RedisError> { + let now = SystemTime::now(); + let mut conn = self.redis_client.get_connection()?; + + let ttl = self.heartbeat_ttl.as_secs(); + conn.set_ex::<_, _, ()>( + self.instance_heartbeat_key(), + now.duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(), + ttl, + )?; + + debug!(message = "Updated instance heartbeat", instance_id = self.instance_id); + + Ok(()) + } + + fn cleanup_stale_instances(&self) -> Result<(), RedisError> { + let mut conn = self.redis_client.get_connection()?; + + let instance_heartbeat_pattern = format!("{}:instance:*:heartbeat", self.key_prefix); + let instance_heartbeats: Vec = conn.keys(instance_heartbeat_pattern)?; + + let active_instance_ids: Vec = instance_heartbeats + .iter() + .filter_map(|key| key.split(':').nth(2).map(String::from)) + .collect(); + + debug!( + message = "Active instances with heartbeats", + instance_count = active_instance_ids.len(), + current_instance = self.instance_id + ); + + let ip_instance_pattern = format!("{}:ip:*:instance:*:connections", self.key_prefix); + let ip_instance_keys: Vec = conn.keys(ip_instance_pattern)?; + + let mut instance_ids_with_ip_connections = std::collections::HashSet::new(); + for key in &ip_instance_keys { + if let Some(instance_id) = key.split(':').nth(4) { + instance_ids_with_ip_connections.insert(instance_id.to_string()); + } + } + + let app_instance_pattern = format!("{}:app:*:instance:*:connections", self.key_prefix); + let app_instance_keys: Vec = conn.keys(app_instance_pattern)?; + + let mut instance_ids_with_app_connections = std::collections::HashSet::new(); + for key in &app_instance_keys { + if let Some(instance_id) = key.split(':').nth(4) { + instance_ids_with_app_connections.insert(instance_id.to_string()); + } + } + + debug!( + message = "Checking for stale instances", + instances_with_ip_connections = instance_ids_with_ip_connections.len(), + instances_with_app_connections = instance_ids_with_app_connections.len(), + current_instance = self.instance_id + ); + + for instance_id in instance_ids_with_ip_connections { + if instance_id == self.instance_id { + debug!(message = "Skipping current instance", instance_id = instance_id); + continue; + } + + if !active_instance_ids.contains(&instance_id) { + debug!( + message = "Found stale instance", + instance_id = instance_id, + reason = "Heartbeat key not found" + ); + self.cleanup_instance(&mut conn, &instance_id)?; + } + } + + for instance_id in instance_ids_with_app_connections { + if instance_id == self.instance_id { + debug!(message = "Skipping current instance", instance_id = instance_id); + continue; + } + + if !active_instance_ids.contains(&instance_id) { + debug!( + message = "Found stale instance", + instance_id = instance_id, + reason = "Heartbeat key not found" + ); + self.cleanup_instance(&mut conn, &instance_id)?; + } + } + + debug!(message = "Completed stale instance cleanup"); + + Ok(()) + } + + fn cleanup_instance( + &self, + conn: &mut redis::Connection, + instance_id: &str, + ) -> Result<(), RedisError> { + let ip_instance_pattern = + format!("{}:ip:*:instance:{}:connections", self.key_prefix, instance_id); + let ip_instance_keys: Vec = conn.keys(ip_instance_pattern)?; + + let app_instance_pattern = + format!("{}:app:*:instance:{}:connections", self.key_prefix, instance_id); + let app_instance_keys: Vec = conn.keys(app_instance_pattern)?; + + debug!( + message = "Cleaning up instance", + instance_id = instance_id, + ip_key_count = ip_instance_keys.len(), + app_key_count = app_instance_keys.len() + ); + + for key in ip_instance_keys { + conn.del::<_, ()>(&key)?; + debug!(message = "Deleted IP instance key", key = key); + } + + for key in app_instance_keys { + conn.del::<_, ()>(&key)?; + debug!(message = "Deleted app instance key", key = key); + } + + Ok(()) + } + + fn ip_instance_key(&self, addr: &IpAddr) -> String { + format!("{}:ip:{}:instance:{}:connections", self.key_prefix, addr, self.instance_id) + } + + fn app_instance_key(&self, app: &str) -> String { + format!("{}:app:{}:instance:{}:connections", self.key_prefix, app, self.instance_id) + } + + fn instance_heartbeat_key(&self) -> String { + format!("{}:instance:{}:heartbeat", self.key_prefix, self.instance_id) + } +} + +impl RateLimit for RedisRateLimit { + fn try_acquire( + self: Arc, + addr: IpAddr, + app: Option, + ) -> Result { + self.clone().start_background_tasks(); + + let permit = match self.semaphore.clone().try_acquire_owned() { + Ok(permit) => permit, + Err(_) => { + return Err(RateLimitError::Limit { + reason: "Maximum connection limit reached for this server instance".to_string(), + }); + } + }; + + let mut conn = match self.redis_client.get_connection() { + Ok(conn) => conn, + Err(e) => { + error!(message = "Failed to connect to Redis", error = e.to_string()); + return Err(RateLimitError::Limit { + reason: "Redis connection failed".to_string(), + }); + } + }; + + let mut ip_instance_connections: usize = 0; + let mut app_instance_connections: usize = 0; + let mut total_ip_connections: usize = 0; + let mut total_app_connections: usize = 0; + + if self.per_ip_limit > 0 { + let ip_keys_pattern = format!("{}:ip:{}:instance:*:connections", self.key_prefix, addr); + let ip_keys: Vec = match conn.keys(ip_keys_pattern) { + Ok(keys) => keys, + Err(e) => { + error!( + message = "Failed to get IP instance keys from Redis", + error = e.to_string() + ); + return Err(RateLimitError::Limit { + reason: "Redis operation failed".to_string(), + }); + } + }; + + for key in &ip_keys { + let count: usize = conn.get(key).unwrap_or(0); + total_ip_connections += count; + } + + if total_ip_connections >= self.per_ip_limit { + return Err(RateLimitError::Limit { + reason: format!("Per-IP connection limit reached for {addr}"), + }); + } + + ip_instance_connections = match conn.incr(self.ip_instance_key(&addr), 1) { + Ok(count) => count, + Err(e) => { + error!( + message = "Failed to increment per-instance IP counter in Redis", + error = e.to_string() + ); + return Err(RateLimitError::Limit { + reason: "Redis operation failed".to_string(), + }); + } + }; + } + + if let Some(app) = app.clone() { + let app_keys_pattern = + format!("{}:app:{}:instance:*:connections", self.key_prefix, app); + let app_keys: Vec = match conn.keys(app_keys_pattern) { + Ok(keys) => keys, + Err(e) => { + error!( + message = "Failed to get app instance keys from Redis", + error = e.to_string() + ); + return Err(RateLimitError::Limit { + reason: "Redis operation failed".to_string(), + }); + } + }; + + for key in &app_keys { + let count: usize = conn.get(key).unwrap_or(0); + total_app_connections += count; + } + + if total_app_connections >= *self.per_app_limit.get(&app).unwrap_or(&0) { + return Err(RateLimitError::Limit { + reason: format!("Per-app connection limit reached for {app}"), + }); + } + + app_instance_connections = match conn.incr(self.app_instance_key(&app), 1) { + Ok(count) => count, + Err(e) => { + error!( + message = "Failed to increment per-instance app counter in Redis", + error = e.to_string() + ); + return Err(RateLimitError::Limit { + reason: "Redis operation failed".to_string(), + }); + } + }; + } + + let total_instance_connections = self.instance_limit - self.semaphore.available_permits(); + + debug!( + message = "Connection established", + ip = addr.to_string(), + ip_instance_connections = ip_instance_connections, + total_ip_connections = total_ip_connections, + app_instance_connections = app_instance_connections, + total_app_connections = total_app_connections, + total_instance_connections = total_instance_connections, + instance_id = self.instance_id + ); + + Ok(Ticket { addr, app, _permit: permit, rate_limiter: self }) + } + + fn release(&self, addr: IpAddr, app: Option) { + match self.redis_client.get_connection() { + Ok(mut conn) => { + if self.per_ip_limit > 0 { + let ip_instance_connections: Result = + conn.decr(self.ip_instance_key(&addr), 1); + + if let Err(ref e) = ip_instance_connections { + error!( + message = "Failed to decrement per-instance IP counter in Redis", + error = e.to_string() + ); + } + + debug!( + message = "Connection released", + ip = addr.to_string(), + ip_instance_connections = ip_instance_connections.unwrap_or(0), + instance_id = self.instance_id + ); + } + + if let Some(app) = app.clone() { + let app_instance_connections: Result = + conn.decr(self.app_instance_key(&app), 1); + + if let Err(ref e) = app_instance_connections { + error!( + message = "Failed to decrement per-instance app counter in Redis", + error = e.to_string() + ); + } + + debug!( + message = "Connection released", + app = app, + app_instance_connections = app_instance_connections.unwrap_or(0), + instance_id = self.instance_id + ); + } + } + Err(e) => { + error!(message = "Failed to connect to Redis for release", error = e.to_string()); + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::str::FromStr; + + const GLOBAL_LIMIT: usize = 3; + const PER_IP_LIMIT: usize = 2; + + #[tokio::test] + async fn test_ip_tickets_are_released() { + let user_1 = IpAddr::from_str("127.0.0.1").unwrap(); + + let rate_limiter = + Arc::new(InMemoryRateLimit::new(GLOBAL_LIMIT, PER_IP_LIMIT, HashMap::new())); + + assert_eq!(rate_limiter.inner.lock().unwrap().semaphore.available_permits(), GLOBAL_LIMIT); + assert_eq!(rate_limiter.inner.lock().unwrap().active_connections_per_ip.len(), 0); + + let c1 = rate_limiter.clone().try_acquire(user_1, None).unwrap(); + + assert_eq!( + rate_limiter.inner.lock().unwrap().semaphore.available_permits(), + GLOBAL_LIMIT - 1 + ); + assert_eq!(rate_limiter.inner.lock().unwrap().active_connections_per_ip.len(), 1); + assert_eq!(rate_limiter.inner.lock().unwrap().active_connections_per_ip[&user_1], 1); + + drop(c1); + + assert_eq!(rate_limiter.inner.lock().unwrap().semaphore.available_permits(), GLOBAL_LIMIT); + assert_eq!(rate_limiter.inner.lock().unwrap().active_connections_per_ip.len(), 0); + } + + #[tokio::test] + async fn test_global_rate_limits() { + let user_1 = IpAddr::from_str("127.0.0.1").unwrap(); + let user_2 = IpAddr::from_str("128.0.0.1").unwrap(); + + let rate_limiter = + Arc::new(InMemoryRateLimit::new(GLOBAL_LIMIT, PER_IP_LIMIT, HashMap::new())); + + let _c1 = rate_limiter.clone().try_acquire(user_1, None).unwrap(); + + let _c2 = rate_limiter.clone().try_acquire(user_2, None).unwrap(); + + let _c3 = rate_limiter.clone().try_acquire(user_1, None).unwrap(); + + assert_eq!(rate_limiter.inner.lock().unwrap().semaphore.available_permits(), 0); + + let c4 = rate_limiter.clone().try_acquire(user_2, None); + assert!(c4.is_err()); + assert_eq!(c4.err().unwrap().to_string(), "Rate Limit Reached: Global limit"); + + drop(_c3); + + let c4 = rate_limiter.clone().try_acquire(user_2, None); + assert!(c4.is_ok()); + } + + #[tokio::test] + async fn test_per_ip_limits() { + let user_1 = IpAddr::from_str("127.0.0.1").unwrap(); + let user_2 = IpAddr::from_str("127.0.0.2").unwrap(); + + let rate_limiter = + Arc::new(InMemoryRateLimit::new(GLOBAL_LIMIT, PER_IP_LIMIT, HashMap::new())); + + let _c1 = rate_limiter.clone().try_acquire(user_1, None).unwrap(); + let _c2 = rate_limiter.clone().try_acquire(user_1, None).unwrap(); + + assert_eq!(rate_limiter.inner.lock().unwrap().active_connections_per_ip[&user_1], 2); + + let c3 = rate_limiter.clone().try_acquire(user_1, None); + assert!(c3.is_err()); + assert_eq!(c3.err().unwrap().to_string(), "Rate Limit Reached: IP limit exceeded"); + + let c4 = rate_limiter.clone().try_acquire(user_2, None); + assert!(c4.is_ok()); + } + + #[tokio::test] + async fn test_global_limits_with_multiple_ips() { + let user_1 = IpAddr::from_str("127.0.0.1").unwrap(); + let user_2 = IpAddr::from_str("127.0.0.2").unwrap(); + let user_3 = IpAddr::from_str("127.0.0.3").unwrap(); + + let rate_limiter = Arc::new(InMemoryRateLimit::new(4, 3, HashMap::new())); + + let ticket_1_1 = rate_limiter.clone().try_acquire(user_1, None).unwrap(); + let ticket_1_2 = rate_limiter.clone().try_acquire(user_1, None).unwrap(); + + let ticket_2_1 = rate_limiter.clone().try_acquire(user_2, None).unwrap(); + let ticket_2_2 = rate_limiter.clone().try_acquire(user_2, None).unwrap(); + + assert_eq!(rate_limiter.inner.lock().unwrap().semaphore.available_permits(), 0); + + // Try user_3 - should fail due to global limit + let result = rate_limiter.clone().try_acquire(user_3, None); + assert!(result.is_err()); + assert_eq!(result.err().unwrap().to_string(), "Rate Limit Reached: Global limit"); + + drop(ticket_1_1); + + let ticket_3_1 = rate_limiter.clone().try_acquire(user_3, None).unwrap(); + + drop(ticket_1_2); + drop(ticket_2_1); + drop(ticket_2_2); + drop(ticket_3_1); + + assert_eq!(rate_limiter.inner.lock().unwrap().semaphore.available_permits(), 4); + assert_eq!(rate_limiter.inner.lock().unwrap().active_connections_per_ip.len(), 0); + } + + #[tokio::test] + async fn test_per_ip_limits_remain_enforced() { + let user_1 = IpAddr::from_str("127.0.0.1").unwrap(); + let user_2 = IpAddr::from_str("127.0.0.2").unwrap(); + + let rate_limiter = Arc::new(InMemoryRateLimit::new(5, 2, HashMap::new())); + + let ticket_1_1 = rate_limiter.clone().try_acquire(user_1, None).unwrap(); + let ticket_1_2 = rate_limiter.clone().try_acquire(user_1, None).unwrap(); + + let result = rate_limiter.clone().try_acquire(user_1, None); + assert!(result.is_err()); + assert_eq!(result.err().unwrap().to_string(), "Rate Limit Reached: IP limit exceeded"); + + let ticket_2_1 = rate_limiter.clone().try_acquire(user_2, None).unwrap(); + drop(ticket_1_1); + + let ticket_1_3 = rate_limiter.clone().try_acquire(user_1, None).unwrap(); + + let result = rate_limiter.clone().try_acquire(user_1, None); + assert!(result.is_err()); + assert_eq!(result.err().unwrap().to_string(), "Rate Limit Reached: IP limit exceeded"); + + drop(ticket_1_2); + drop(ticket_1_3); + drop(ticket_2_1); + + assert_eq!(rate_limiter.inner.lock().unwrap().semaphore.available_permits(), 5); + assert_eq!(rate_limiter.inner.lock().unwrap().active_connections_per_ip.len(), 0); + } + + #[tokio::test] + #[cfg(all(feature = "integration", test))] + async fn test_redis_instance_ip_tracking_and_cleanup() { + use std::time::Duration; + use testcontainers::runners::AsyncRunner; + use testcontainers_modules::redis::Redis; + + let container = Redis::default().start().await.unwrap(); + let host_port = container.get_host_port_ipv4(6379).await.unwrap(); + let client_addr = format!("redis://127.0.0.1:{host_port}"); + + tokio::time::sleep(Duration::from_millis(100)).await; + + let user_1 = IpAddr::from_str("127.0.0.1").unwrap(); + let user_2 = IpAddr::from_str("127.0.0.2").unwrap(); + + let redis_client = Client::open(client_addr.as_str()).unwrap(); + + { + let rate_limiter1 = Arc::new(RedisRateLimit { + redis_client: Client::open(client_addr.as_str()).unwrap(), + instance_limit: 10, + per_ip_limit: 5, + per_app_limit: HashMap::new(), + semaphore: Arc::new(Semaphore::new(10)), + key_prefix: "test".to_string(), + instance_id: "instance1".to_string(), + heartbeat_interval: Duration::from_millis(200), + heartbeat_ttl: Duration::from_secs(1), + background_tasks_started: AtomicBool::new(true), + }); + + rate_limiter1.register_instance().unwrap(); + let _ticket1 = rate_limiter1.clone().try_acquire(user_1, None).unwrap(); + let _ticket2 = rate_limiter1.clone().try_acquire(user_2, None).unwrap(); + // no drop on release (exit of block) + std::mem::forget(_ticket1); + std::mem::forget(_ticket2); + + { + let mut conn = redis_client.get_connection().unwrap(); + + let exists: bool = redis::cmd("EXISTS") + .arg("test:instance:instance1:heartbeat".to_string()) + .query(&mut conn) + .unwrap(); + assert!(exists, "Instance1 heartbeat should exist initially"); + + let ip1_instance1_count: usize = redis::cmd("GET") + .arg("test:ip:127.0.0.1:instance:instance1:connections") + .query(&mut conn) + .unwrap(); + let ip2_instance1_count: usize = redis::cmd("GET") + .arg("test:ip:127.0.0.2:instance:instance1:connections") + .query(&mut conn) + .unwrap(); + + assert_eq!(ip1_instance1_count, 1, "IP1 count should be 1 initially"); + assert_eq!(ip2_instance1_count, 1, "IP2 count should be 1 initially"); + } + }; + + tokio::time::sleep(Duration::from_secs(1)).await; + + { + let mut conn = redis_client.get_connection().unwrap(); + + let exists: bool = redis::cmd("EXISTS") + .arg("test:instance:instance1:heartbeat".to_string()) + .query(&mut conn) + .unwrap(); + assert!(!exists, "Instance1 heartbeat should be gone after TTL expiration"); + + let ip1_instance1_count: usize = redis::cmd("GET") + .arg("test:ip:127.0.0.1:instance:instance1:connections") + .query(&mut conn) + .unwrap(); + let ip2_instance1_count: usize = redis::cmd("GET") + .arg("test:ip:127.0.0.2:instance:instance1:connections") + .query(&mut conn) + .unwrap(); + + assert_eq!( + ip1_instance1_count, 1, + "IP1 instance1 count should still be 1 after instance1 crash" + ); + assert_eq!(ip2_instance1_count, 1, "IP2 instance1 count should still be 1 after crash"); + } + + let rate_limiter2 = Arc::new(RedisRateLimit { + redis_client: Client::open(client_addr.as_str()).unwrap(), + instance_limit: 10, + per_ip_limit: 5, + per_app_limit: HashMap::new(), + semaphore: Arc::new(Semaphore::new(10)), + key_prefix: "test".to_string(), + instance_id: "instance2".to_string(), + heartbeat_interval: Duration::from_millis(200), + heartbeat_ttl: Duration::from_secs(2), + background_tasks_started: AtomicBool::new(false), + }); + + rate_limiter2.register_instance().unwrap(); + rate_limiter2.cleanup_stale_instances().unwrap(); + + tokio::time::sleep(Duration::from_secs(1)).await; + + { + let mut conn = redis_client.get_connection().unwrap(); + + let ip1_instance1_exists: bool = redis::cmd("EXISTS") + .arg("test:ip:127.0.0.1:instance:instance1:connections") + .query(&mut conn) + .unwrap(); + let ip2_instance1_exists: bool = redis::cmd("EXISTS") + .arg("test:ip:127.0.0.2:instance:instance1:connections") + .query(&mut conn) + .unwrap(); + + assert!(!ip1_instance1_exists, "IP1 instance1 counter should be gone after cleanup"); + assert!(!ip2_instance1_exists, "IP2 instance1 counter should be gone after cleanup"); + } + + let _ticket3 = rate_limiter2.clone().try_acquire(user_1, None).unwrap(); + + { + let mut conn = redis_client.get_connection().unwrap(); + let ip1_instance2_count: usize = redis::cmd("GET") + .arg("test:ip:127.0.0.1:instance:instance2:connections") + .query(&mut conn) + .unwrap(); + + assert_eq!(ip1_instance2_count, 1, "IP1 instance2 count should be 1"); + } + } + + // API Key (App) Rate Limiting Tests + const PER_APP_LIMIT: usize = 2; + + #[tokio::test] + async fn test_app_tickets_are_released() { + let user_1 = IpAddr::from_str("127.0.0.1").unwrap(); + let app_1 = "app_1".to_string(); + + let mut per_app_limits = HashMap::new(); + per_app_limits.insert(app_1.clone(), PER_APP_LIMIT); + + let rate_limiter = Arc::new(InMemoryRateLimit::new( + GLOBAL_LIMIT, + 0, // Disable IP rate limiting + per_app_limits, + )); + + assert_eq!(rate_limiter.inner.lock().unwrap().semaphore.available_permits(), GLOBAL_LIMIT); + assert_eq!(rate_limiter.inner.lock().unwrap().active_connections_per_app.len(), 0); + + let c1 = rate_limiter.clone().try_acquire(user_1, Some(app_1.clone())).unwrap(); + + assert_eq!( + rate_limiter.inner.lock().unwrap().semaphore.available_permits(), + GLOBAL_LIMIT - 1 + ); + assert_eq!(rate_limiter.inner.lock().unwrap().active_connections_per_app.len(), 1); + assert_eq!(rate_limiter.inner.lock().unwrap().active_connections_per_app[&app_1], 1); + + drop(c1); + + assert_eq!(rate_limiter.inner.lock().unwrap().semaphore.available_permits(), GLOBAL_LIMIT); + assert_eq!(rate_limiter.inner.lock().unwrap().active_connections_per_app.len(), 0); + } + + #[tokio::test] + async fn test_per_app_limits() { + let user_1 = IpAddr::from_str("127.0.0.1").unwrap(); + let user_2 = IpAddr::from_str("127.0.0.2").unwrap(); + let app_1 = "app_1".to_string(); + let app_2 = "app_2".to_string(); + + let mut per_app_limits = HashMap::new(); + per_app_limits.insert(app_1.clone(), PER_APP_LIMIT); + per_app_limits.insert(app_2.clone(), PER_APP_LIMIT); + + let rate_limiter = Arc::new(InMemoryRateLimit::new( + GLOBAL_LIMIT, + 0, // Disable IP rate limiting + per_app_limits, + )); + + let _c1 = rate_limiter.clone().try_acquire(user_1, Some(app_1.clone())).unwrap(); + let _c2 = rate_limiter.clone().try_acquire(user_2, Some(app_1.clone())).unwrap(); + + assert_eq!(rate_limiter.inner.lock().unwrap().active_connections_per_app[&app_1], 2); + + let c3 = rate_limiter.clone().try_acquire(user_1, Some(app_1.clone())); + assert!(c3.is_err()); + assert_eq!(c3.err().unwrap().to_string(), "Rate Limit Reached: App limit exceeded"); + + // Different app should still work + let c4 = rate_limiter.clone().try_acquire(user_2, Some(app_2.clone())); + assert!(c4.is_ok()); + } + + #[tokio::test] + async fn test_global_limits_with_multiple_apps() { + let user_1 = IpAddr::from_str("127.0.0.1").unwrap(); + let user_2 = IpAddr::from_str("127.0.0.2").unwrap(); + let user_3 = IpAddr::from_str("127.0.0.3").unwrap(); + let app_1 = "app_1".to_string(); + let app_2 = "app_2".to_string(); + let app_3 = "app_3".to_string(); + + let mut per_app_limits = HashMap::new(); + per_app_limits.insert(app_1.clone(), PER_APP_LIMIT); + per_app_limits.insert(app_2.clone(), PER_APP_LIMIT); + per_app_limits.insert(app_3.clone(), PER_APP_LIMIT); + + let rate_limiter = Arc::new(InMemoryRateLimit::new(4, 0, per_app_limits)); + + let ticket_1_1 = rate_limiter.clone().try_acquire(user_1, Some(app_1.clone())).unwrap(); + let ticket_1_2 = rate_limiter.clone().try_acquire(user_1, Some(app_1.clone())).unwrap(); + + let ticket_2_1 = rate_limiter.clone().try_acquire(user_2, Some(app_2.clone())).unwrap(); + let ticket_2_2 = rate_limiter.clone().try_acquire(user_2, Some(app_2.clone())).unwrap(); + + assert_eq!(rate_limiter.inner.lock().unwrap().semaphore.available_permits(), 0); + + // Try app_3 - should fail due to global limit + let result = rate_limiter.clone().try_acquire(user_3, Some(app_3.clone())); + assert!(result.is_err()); + assert_eq!(result.err().unwrap().to_string(), "Rate Limit Reached: Global limit"); + + drop(ticket_1_1); + + let ticket_3_1 = rate_limiter.clone().try_acquire(user_3, Some(app_3.clone())).unwrap(); + + drop(ticket_1_2); + drop(ticket_2_1); + drop(ticket_2_2); + drop(ticket_3_1); + + assert_eq!(rate_limiter.inner.lock().unwrap().semaphore.available_permits(), 4); + assert_eq!(rate_limiter.inner.lock().unwrap().active_connections_per_app.len(), 0); + } + + #[tokio::test] + async fn test_per_app_limits_remain_enforced() { + let user_1 = IpAddr::from_str("127.0.0.1").unwrap(); + let user_2 = IpAddr::from_str("127.0.0.2").unwrap(); + let app_1 = "app_1".to_string(); + let app_2 = "app_2".to_string(); + + let mut per_app_limits = HashMap::new(); + per_app_limits.insert(app_1.clone(), PER_APP_LIMIT); + per_app_limits.insert(app_2.clone(), PER_APP_LIMIT); + + let rate_limiter = Arc::new(InMemoryRateLimit::new(5, 0, per_app_limits)); + + let ticket_1_1 = rate_limiter.clone().try_acquire(user_1, Some(app_1.clone())).unwrap(); + let ticket_1_2 = rate_limiter.clone().try_acquire(user_2, Some(app_1.clone())).unwrap(); + + let result = rate_limiter.clone().try_acquire(user_1, Some(app_1.clone())); + assert!(result.is_err()); + assert_eq!(result.err().unwrap().to_string(), "Rate Limit Reached: App limit exceeded"); + + let ticket_2_1 = rate_limiter.clone().try_acquire(user_2, Some(app_2.clone())).unwrap(); + drop(ticket_1_1); + + let ticket_1_3 = rate_limiter.clone().try_acquire(user_1, Some(app_1.clone())).unwrap(); + + let result = rate_limiter.clone().try_acquire(user_2, Some(app_1.clone())); + assert!(result.is_err()); + assert_eq!(result.err().unwrap().to_string(), "Rate Limit Reached: App limit exceeded"); + + drop(ticket_1_2); + drop(ticket_1_3); + drop(ticket_2_1); + + assert_eq!(rate_limiter.inner.lock().unwrap().semaphore.available_permits(), 5); + assert_eq!(rate_limiter.inner.lock().unwrap().active_connections_per_app.len(), 0); + } + + #[tokio::test] + #[cfg(all(feature = "integration", test))] + async fn test_redis_instance_app_tracking_and_cleanup() { + use redis_test::server::RedisServer; + use std::time::Duration; + + let server = RedisServer::new(); + let client_addr = format!("redis://{}", server.client_addr()); + + tokio::time::sleep(Duration::from_millis(100)).await; + + let user_1 = IpAddr::from_str("127.0.0.1").unwrap(); + let user_2 = IpAddr::from_str("127.0.0.2").unwrap(); + let app_1 = "app_1".to_string(); + let app_2 = "app_2".to_string(); + + let mut per_app_limits = HashMap::new(); + per_app_limits.insert(app_1.clone(), 5); + per_app_limits.insert(app_2.clone(), 5); + + let redis_client = Client::open(client_addr.as_str()).unwrap(); + + { + let rate_limiter1 = Arc::new(RedisRateLimit { + redis_client: Client::open(client_addr.as_str()).unwrap(), + instance_limit: 10, + per_ip_limit: 0, // Disable IP rate limiting + per_app_limit: per_app_limits.clone(), + semaphore: Arc::new(Semaphore::new(10)), + key_prefix: "test".to_string(), + instance_id: "instance1".to_string(), + heartbeat_interval: Duration::from_millis(200), + heartbeat_ttl: Duration::from_secs(1), + background_tasks_started: AtomicBool::new(true), + }); + + rate_limiter1.register_instance().unwrap(); + let _ticket1 = rate_limiter1.clone().try_acquire(user_1, Some(app_1.clone())).unwrap(); + let _ticket2 = rate_limiter1.clone().try_acquire(user_2, Some(app_2.clone())).unwrap(); + // no drop on release (exit of block) + std::mem::forget(_ticket1); + std::mem::forget(_ticket2); + + { + let mut conn = redis_client.get_connection().unwrap(); + + let exists: bool = redis::cmd("EXISTS") + .arg("test:instance:instance1:heartbeat") + .query(&mut conn) + .unwrap(); + assert!(exists, "Instance1 heartbeat should exist initially"); + + let app1_instance1_count: usize = redis::cmd("GET") + .arg(format!("test:app:{app_1}:instance:instance1:connections")) + .query(&mut conn) + .unwrap(); + let app2_instance1_count: usize = redis::cmd("GET") + .arg(format!("test:app:{app_2}:instance:instance1:connections")) + .query(&mut conn) + .unwrap(); + + assert_eq!(app1_instance1_count, 1, "App1 count should be 1 initially"); + assert_eq!(app2_instance1_count, 1, "App2 count should be 1 initially"); + } + }; + + tokio::time::sleep(Duration::from_secs(1)).await; + + { + let mut conn = redis_client.get_connection().unwrap(); + + let exists: bool = redis::cmd("EXISTS") + .arg("test:instance:instance1:heartbeat") + .query(&mut conn) + .unwrap(); + assert!(!exists, "Instance1 heartbeat should be gone after TTL expiration"); + + let app1_instance1_count: usize = redis::cmd("GET") + .arg(format!("test:app:{app_1}:instance:instance1:connections")) + .query(&mut conn) + .unwrap(); + let app2_instance1_count: usize = redis::cmd("GET") + .arg(format!("test:app:{app_2}:instance:instance1:connections")) + .query(&mut conn) + .unwrap(); + + assert_eq!( + app1_instance1_count, 1, + "App1 instance1 count should still be 1 after instance1 crash" + ); + assert_eq!( + app2_instance1_count, 1, + "App2 instance1 count should still be 1 after crash" + ); + } + + let rate_limiter2 = Arc::new(RedisRateLimit { + redis_client: Client::open(client_addr.as_str()).unwrap(), + instance_limit: 10, + per_ip_limit: 0, // Disable IP rate limiting + per_app_limit: per_app_limits, + semaphore: Arc::new(Semaphore::new(10)), + key_prefix: "test".to_string(), + instance_id: "instance2".to_string(), + heartbeat_interval: Duration::from_millis(200), + heartbeat_ttl: Duration::from_secs(2), + background_tasks_started: AtomicBool::new(false), + }); + + rate_limiter2.register_instance().unwrap(); + rate_limiter2.cleanup_stale_instances().unwrap(); + + tokio::time::sleep(Duration::from_secs(1)).await; + + { + let mut conn = redis_client.get_connection().unwrap(); + + let app1_instance1_exists: bool = redis::cmd("EXISTS") + .arg(format!("test:app:{app_1}:instance:instance1:connections")) + .query(&mut conn) + .unwrap(); + let app2_instance1_exists: bool = redis::cmd("EXISTS") + .arg(format!("test:app:{app_2}:instance:instance1:connections")) + .query(&mut conn) + .unwrap(); + + assert!(!app1_instance1_exists, "App1 instance1 counter should be gone after cleanup"); + assert!(!app2_instance1_exists, "App2 instance1 counter should be gone after cleanup"); + } + + let _ticket3 = rate_limiter2.clone().try_acquire(user_1, Some(app_1.clone())).unwrap(); + + { + let mut conn = redis_client.get_connection().unwrap(); + let app1_instance2_count: usize = redis::cmd("GET") + .arg(format!("test:app:{app_1}:instance:instance2:connections")) + .query(&mut conn) + .unwrap(); + + assert_eq!(app1_instance2_count, 1, "App1 instance2 count should be 1"); + } + } +} diff --git a/rust/rollup-boost/crates/websocket-proxy/src/registry.rs b/rust/rollup-boost/crates/websocket-proxy/src/registry.rs new file mode 100644 index 0000000000000..f214525bdf3c8 --- /dev/null +++ b/rust/rollup-boost/crates/websocket-proxy/src/registry.rs @@ -0,0 +1,149 @@ +use crate::{client::ClientConnection, metrics::Metrics}; +use axum::extract::ws::Message; +use futures::{SinkExt, stream::StreamExt}; +use std::{sync::Arc, time::Instant}; +use tokio::{ + sync::broadcast::{Sender, error::RecvError}, + time::{Duration, interval}, +}; +use tracing::{debug, info, trace, warn}; + +#[derive(Clone)] +pub struct Registry { + sender: Sender, + metrics: Arc, + ping_enabled: bool, + pong_timeout_ms: u64, +} + +impl Registry { + pub fn new( + sender: Sender, + metrics: Arc, + ping_enabled: bool, + pong_timeout_ms: u64, + ) -> Self { + Self { sender, metrics, ping_enabled, pong_timeout_ms } + } + + pub async fn subscribe(&self, client: ClientConnection) { + info!(message = "subscribing client", client = client.id()); + + let mut receiver = self.sender.subscribe(); + let metrics = self.metrics.clone(); + metrics.new_connections.increment(1); + + let client_id = client.id(); + let (mut ws_sender, ws_receiver) = client.websocket.split(); + + let (disconnect_client_tx, mut disconnect_client_rx) = tokio::sync::oneshot::channel(); + let client_reader = self.start_reader(ws_receiver, client_id.clone(), disconnect_client_tx); + + loop { + tokio::select! { + broadcast_result = receiver.recv() => { + match broadcast_result { + Ok(msg) => { + if let Err(e) = ws_sender.send(msg.clone()).await { + warn!( + message = "failed to send data to client", + client = client_id, + error = e.to_string() + ); + metrics.failed_messages.increment(1); + break; + } + trace!(message = "message sent to client", client = client_id); + metrics.record_message_sent(&msg); + } + Err(RecvError::Closed) => { + info!(message = "upstream connection closed", client = client_id); + break; + } + Err(RecvError::Lagged(_)) => { + info!(message = "client is lagging", client = client_id); + metrics.lagged_connections.increment(1); + break; + } + } + } + + _ = &mut disconnect_client_rx => { + debug!(message = "client reader signaled disconnect", client = client_id); + break; + } + } + } + + client_reader.abort(); + metrics.closed_connections.increment(1); + + info!(message = "client disconnected", client = client_id); + } + + fn start_reader( + &self, + ws_receiver: futures::stream::SplitStream, + client_id: String, + disconnect_client_tx: tokio::sync::oneshot::Sender<()>, + ) -> tokio::task::JoinHandle<()> { + let ping_enabled = self.ping_enabled; + let pong_timeout_ms = self.pong_timeout_ms; + let metrics = self.metrics.clone(); + + tokio::spawn(async move { + let mut ws_receiver = ws_receiver; + let mut last_pong = Instant::now(); + let mut timeout_checker = interval(Duration::from_millis(pong_timeout_ms / 4)); + let pong_timeout = Duration::from_millis(pong_timeout_ms); + + loop { + tokio::select! { + msg = ws_receiver.next() => { + match msg { + Some(Ok(Message::Pong(_))) => { + if ping_enabled { + trace!(message = "received pong from client", client = client_id); + last_pong = Instant::now(); + } + } + Some(Ok(Message::Close(_))) => { + trace!(message = "received close from client", client = client_id); + let _ = disconnect_client_tx.send(()); + return; + } + Some(Err(e)) => { + trace!( + message = "error receiving from client", + client = client_id, + error = e.to_string() + ); + let _ = disconnect_client_tx.send(()); + return; + } + None => { + trace!(message = "client connection closed", client = client_id); + let _ = disconnect_client_tx.send(()); + return; + } + _ => {} + } + } + + _ = timeout_checker.tick() => { + if ping_enabled && last_pong.elapsed() > pong_timeout { + debug!( + message = "client pong timeout, disconnecting", + client = client_id, + elapsed_ms = last_pong.elapsed().as_millis() + ); + metrics.client_pong_disconnects.increment(1); + let _ = disconnect_client_tx.send(()); + return; + } + } + } + } + }) + } +} diff --git a/rust/rollup-boost/crates/websocket-proxy/src/server.rs b/rust/rollup-boost/crates/websocket-proxy/src/server.rs new file mode 100644 index 0000000000000..b002b0e071ceb --- /dev/null +++ b/rust/rollup-boost/crates/websocket-proxy/src/server.rs @@ -0,0 +1,208 @@ +use crate::{ + auth::Authentication, + client::ClientConnection, + metrics::Metrics, + rate_limit::{RateLimit, RateLimitError}, + registry::Registry, +}; +use axum::{ + Error, Router, + body::Body, + extract::{ConnectInfo, Path, State, WebSocketUpgrade}, + http::StatusCode, + response::{IntoResponse, Response}, + routing::{any, get}, +}; +use http::{HeaderMap, HeaderValue}; +use serde_json::json; +use std::{ + net::{IpAddr, SocketAddr}, + sync::Arc, +}; +use tokio_util::sync::CancellationToken; +use tracing::{info, warn}; + +#[derive(Clone)] +struct ServerState { + registry: Registry, + rate_limiter: Arc, + metrics: Arc, + auth: Authentication, + ip_addr_http_header: String, +} + +#[derive(Clone)] +pub struct Server { + listen_addr: SocketAddr, + registry: Registry, + rate_limiter: Arc, + metrics: Arc, + ip_addr_http_header: String, + authentication: Option, +} + +impl Server { + pub fn new( + listen_addr: SocketAddr, + registry: Registry, + metrics: Arc, + rate_limiter: Arc, + authentication: Option, + ip_addr_http_header: String, + ) -> Self { + Self { listen_addr, registry, rate_limiter, metrics, authentication, ip_addr_http_header } + } + + pub async fn listen(&self, cancellation_token: CancellationToken) { + let mut router: Router = Router::new().route("/healthz", get(healthz_handler)); + + if self.authentication.is_some() { + info!("Authentication is enabled"); + router = router.route("/ws/{api_key}", any(authenticated_websocket_handler)); + } else { + info!("Public endpoint is enabled"); + router = router.route("/ws", any(unauthenticated_websocket_handler)); + } + + let router = router.with_state(ServerState { + registry: self.registry.clone(), + rate_limiter: self.rate_limiter.clone(), + metrics: self.metrics.clone(), + auth: self.authentication.clone().unwrap_or_else(Authentication::none), + ip_addr_http_header: self.ip_addr_http_header.clone(), + }); + + let listener = tokio::net::TcpListener::bind(self.listen_addr).await.unwrap(); + + info!(message = "starting server", address = listener.local_addr().unwrap().to_string()); + + axum::serve(listener, router.into_make_service_with_connect_info::()) + .with_graceful_shutdown(cancellation_token.cancelled_owned()) + .await + .unwrap() + } +} + +async fn healthz_handler() -> impl IntoResponse { + StatusCode::OK +} + +async fn authenticated_websocket_handler( + State(state): State, + ws: WebSocketUpgrade, + ConnectInfo(addr): ConnectInfo, + headers: HeaderMap, + Path(api_key): Path, +) -> impl IntoResponse { + let application = state.auth.get_application_for_key(&api_key); + + match application { + None => { + state.metrics.unauthorized_requests.increment(1); + + Response::builder() + .status(StatusCode::UNAUTHORIZED) + .body(Body::from(json!({"message": "Invalid API key"}).to_string())) + .unwrap() + } + Some(app) => { + let app = app.clone(); + state.metrics.proxy_connections_by_app(&app); + websocket_handler(state, ws, addr, headers, Some(app)) + } + } +} + +async fn unauthenticated_websocket_handler( + State(state): State, + ws: WebSocketUpgrade, + ConnectInfo(addr): ConnectInfo, + headers: HeaderMap, +) -> impl IntoResponse { + websocket_handler(state, ws, addr, headers, None) +} + +fn websocket_handler( + state: ServerState, + ws: WebSocketUpgrade, + addr: SocketAddr, + headers: HeaderMap, + app: Option, +) -> Response { + let connect_addr = addr.ip(); + + let client_addr = match headers.get(state.ip_addr_http_header) { + None => connect_addr, + Some(value) => extract_addr(value, connect_addr), + }; + + let ticket = match state.rate_limiter.try_acquire(client_addr, app) { + Ok(ticket) => ticket, + Err(RateLimitError::Limit { reason }) => { + state.metrics.rate_limited_requests.increment(1); + + return Response::builder() + .status(StatusCode::TOO_MANY_REQUESTS) + .body(Body::from(json!({"message": reason}).to_string())) + .unwrap(); + } + }; + + ws.on_failed_upgrade(move |e: Error| { + info!( + message = "failed to upgrade connection", + error = e.to_string(), + client = addr.to_string() + ) + }) + .on_upgrade(async move |socket| { + let client = ClientConnection::new(client_addr, ticket, socket); + state.registry.subscribe(client).await; + }) +} + +fn extract_addr(header: &HeaderValue, fallback: IpAddr) -> IpAddr { + if header.is_empty() { + return fallback; + } + + match header.to_str() { + Ok(header_value) => { + let raw_value = header_value.split(',').map(|ip| ip.trim().to_string()).next_back(); + + if let Some(raw_value) = raw_value { + return raw_value.parse::().unwrap_or(fallback); + } + + fallback + } + Err(e) => { + warn!(message = "could not get header value", error = e.to_string()); + fallback + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::net::Ipv4Addr; + + #[tokio::test] + async fn test_header_addr() { + let fb = Ipv4Addr::new(127, 0, 0, 1); + + let test = |header: &str, expected: Ipv4Addr| { + let hv = HeaderValue::from_str(header).unwrap(); + let result = extract_addr(&hv, IpAddr::V4(fb)); + assert_eq!(result, expected); + }; + + test("129.1.1.1", Ipv4Addr::new(129, 1, 1, 1)); + test("129.1.1.1,130.1.1.1", Ipv4Addr::new(130, 1, 1, 1)); + test("129.1.1.1 , 130.1.1.1 ", Ipv4Addr::new(130, 1, 1, 1)); + test("nonsense", fb); + test("400.0.0.1", fb); + test("120.0.0.1.0", fb); + } +} diff --git a/rust/rollup-boost/crates/websocket-proxy/src/subscriber.rs b/rust/rollup-boost/crates/websocket-proxy/src/subscriber.rs new file mode 100644 index 0000000000000..211fd24d4e615 --- /dev/null +++ b/rust/rollup-boost/crates/websocket-proxy/src/subscriber.rs @@ -0,0 +1,538 @@ +use crate::metrics::Metrics; +use axum::http::Uri; +use backoff::{ExponentialBackoff, backoff::Backoff}; +use futures::{SinkExt, StreamExt}; +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; +use tokio::{select, sync::oneshot}; +use tokio_tungstenite::{ + connect_async, + tungstenite::{Error, Error::ConnectionClosed, Message}, +}; +use tokio_util::{bytes, sync::CancellationToken}; +use tracing::{error, info, trace, warn}; + +#[derive(Debug, Clone)] +pub struct SubscriberOptions { + pub max_backoff_interval: Duration, + pub backoff_initial_interval: Duration, + pub ping_interval: Duration, + pub pong_timeout: Duration, + pub initial_grace_period: Duration, +} + +impl SubscriberOptions { + pub fn with_max_backoff_interval(mut self, max_backoff_interval: Duration) -> Self { + self.max_backoff_interval = max_backoff_interval; + self + } + + pub fn with_ping_interval(mut self, ping_interval: Duration) -> Self { + self.ping_interval = ping_interval; + self + } + + pub fn with_pong_timeout(mut self, pong_timeout: Duration) -> Self { + self.pong_timeout = pong_timeout; + self + } + + pub fn with_backoff_initial_interval(mut self, backoff_initial_interval: Duration) -> Self { + self.backoff_initial_interval = backoff_initial_interval; + self + } + + pub fn with_initial_grace_period(mut self, initial_grace_period: Duration) -> Self { + self.initial_grace_period = initial_grace_period; + self + } +} + +impl Default for SubscriberOptions { + fn default() -> Self { + Self { + max_backoff_interval: Duration::from_millis(20000), + backoff_initial_interval: Duration::from_millis(500), + ping_interval: Duration::from_millis(2000), + pong_timeout: Duration::from_millis(4000), + initial_grace_period: Duration::from_secs(5), + } + } +} + +pub struct WebsocketSubscriber +where + F: Fn(Vec) + Send + Sync + 'static, +{ + uri: Uri, + handler: F, + backoff: ExponentialBackoff, + metrics: Arc, + options: SubscriberOptions, +} + +impl WebsocketSubscriber +where + F: Fn(Vec) + Send + Sync + 'static, +{ + pub fn new(uri: Uri, handler: F, metrics: Arc, options: SubscriberOptions) -> Self { + let backoff = ExponentialBackoff { + initial_interval: options.backoff_initial_interval, + max_interval: options.max_backoff_interval, + max_elapsed_time: None, + ..Default::default() + }; + + Self { uri, handler, backoff, metrics, options } + } + + pub async fn run(&mut self, token: CancellationToken) { + info!(message = "starting upstream subscription", uri = self.uri.to_string()); + loop { + select! { + _ = token.cancelled() => { + info!( + message = "cancelled upstream subscription", + uri = self.uri.to_string() + ); + return; + } + result = self.connect_and_listen() => { + match result { + Ok(()) => { + info!( + message = "upstream connection closed", + uri = self.uri.to_string() + ); + } + Err(e) => { + error!( + message = "upstream websocket error", + uri = self.uri.to_string(), + error = e.to_string() + ); + self.metrics.upstream_errors.increment(1); + self.metrics.upstream_connections.decrement(1); + + if let Some(duration) = self.backoff.next_backoff() { + warn!( + message = "reconnecting", + uri = self.uri.to_string(), + seconds = duration.as_secs() + ); + select! { + _ = token.cancelled() => { + info!( + message = "cancelled subscriber during backoff", + uri = self.uri.to_string() + ); + return + } + _ = tokio::time::sleep(duration) => {} + } + } + } + } + } + } + } + } + + async fn connect_and_listen(&mut self) -> Result<(), Error> { + info!(message = "connecting to websocket", uri = self.uri.to_string()); + + self.metrics.upstream_connection_attempts.increment(1); + + let (ws_stream, _) = match connect_async(&self.uri).await { + Ok(connection) => { + self.metrics.upstream_connection_successes.increment(1); + connection + } + Err(e) => { + self.metrics.upstream_connection_failures.increment(1); + return Err(e); + } + }; + + info!(message = "websocket connection established", uri = self.uri.to_string()); + + self.metrics.upstream_connections.increment(1); + // Reset backoff timer on successful connection + self.backoff.reset(); + + let (mut write, mut read) = ws_stream.split(); + + let (ping_error_tx, mut ping_error_rx) = oneshot::channel(); + let options = self.options.clone(); + let mut pong_deadline = Instant::now() + options.initial_grace_period; + + let ping_task = tokio::spawn(async move { + let mut interval = tokio::time::interval(options.ping_interval); + loop { + interval.tick().await; + if let Err(e) = write.send(Message::Ping(bytes::Bytes::new())).await { + error!(message = "failed to send ping to upstream", error = e.to_string()); + let _ = ping_error_tx.send(e); + break; + } + } + }); + + let mut deadline_check = tokio::time::interval(self.options.pong_timeout / 4); + + let result = loop { + select! { + _ = deadline_check.tick() => { + if Instant::now() >= pong_deadline { + error!( + message = "pong timeout from upstream", + uri = self.uri.to_string() + ); + break Err(ConnectionClosed); + } + } + Ok(ping_err) = &mut ping_error_rx => { + break Err(ping_err); + } + message = read.next() => { + let Some(msg) = message else { + break Ok(()); + }; + if let Err(e) = self.handle_message(msg, &mut pong_deadline, options.pong_timeout).await { + break Err(e); + } + } + } + }; + + ping_task.abort(); + result + } + + async fn handle_message( + &self, + message: Result, + pong_deadline: &mut Instant, + pong_timeout: Duration, + ) -> Result<(), Error> { + let msg = match message { + Ok(msg) => msg, + Err(e) => { + error!( + message = "error receiving message", + uri = self.uri.to_string(), + error = e.to_string() + ); + return Err(e); + } + }; + + match msg { + Message::Text(text) => { + trace!( + message = "received text message", + uri = self.uri.to_string(), + payload = text.as_str() + ); + self.metrics.message_received_from_upstream(self.uri.to_string().as_str()); + (self.handler)(text.as_bytes().to_vec()); + } + Message::Binary(data) => { + trace!( + message = "received binary message", + uri = self.uri.to_string(), + payload = ?data.as_ref() + ); + self.metrics.message_received_from_upstream(self.uri.to_string().as_str()); + (self.handler)(data.as_ref().to_vec()); + } + Message::Pong(_) => { + trace!(message = "received pong from upstream", uri = self.uri.to_string()); + *pong_deadline = Instant::now() + pong_timeout; + } + Message::Close(_) => { + info!(message = "received close frame from upstream", uri = self.uri.to_string()); + return Err(ConnectionClosed); + } + _ => {} + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::metrics::Metrics; + use axum::http::Uri; + use futures::SinkExt; + use std::{ + net::SocketAddr, + sync::{Arc, Mutex}, + }; + use tokio::{ + net::{TcpListener, TcpStream}, + sync::broadcast, + time::{Duration, sleep, timeout}, + }; + use tokio_tungstenite::accept_async; + + struct MockServer { + addr: SocketAddr, + message_sender: broadcast::Sender>, + shutdown: CancellationToken, + } + + impl MockServer { + async fn new() -> Self { + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + let addr = listener.local_addr().unwrap(); + let (tx, _) = broadcast::channel::>(100); + let shutdown = CancellationToken::new(); + let shutdown_clone = shutdown.clone(); + let tx_clone = tx.clone(); + + tokio::spawn(async move { + loop { + select! { + _ = shutdown_clone.cancelled() => { + break; + } + accept_result = listener.accept() => { + match accept_result { + Ok((stream, _)) => { + let tx = tx_clone.clone(); + let shutdown = shutdown_clone.clone(); + tokio::spawn(async move { + Self::handle_connection(stream, tx, shutdown).await; + }); + } + Err(e) => { + eprintln!("Failed to accept: {e}"); + break; + } + } + } + } + } + }); + + Self { addr, message_sender: tx, shutdown } + } + + async fn handle_connection( + stream: TcpStream, + tx: broadcast::Sender>, + shutdown: CancellationToken, + ) { + let ws_stream = match accept_async(stream).await { + Ok(ws_stream) => ws_stream, + Err(e) => { + eprintln!("Failed to accept websocket: {e}"); + return; + } + }; + + let (mut ws_sender, _) = ws_stream.split(); + + let mut rx = tx.subscribe(); + + loop { + select! { + _ = shutdown.cancelled() => { + break; + } + msg = rx.recv() => { + match msg { + Ok(data) => { + if let Err(e) = ws_sender.send(data.into()).await { + eprintln!("Error sending message: {e}"); + break; + } + } + Err(_) => { + break; + } + } + } + } + } + } + + async fn send_message( + &self, + msg: &[u8], + ) -> Result>> { + self.message_sender.send(msg.to_vec()) + } + + async fn shutdown(self) { + self.shutdown.cancel(); + } + + fn uri(&self) -> Uri { + format!("ws://{}", self.addr).parse().expect("Failed to parse URI") + } + } + + #[tokio::test] + async fn test_ping_pong_reconnection() { + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + let addr = listener.local_addr().unwrap(); + let uri: Uri = format!("ws://{addr}").parse().unwrap(); + + let shutdown = CancellationToken::new(); + let shutdown_server = shutdown.clone(); + + let connection_count = Arc::new(std::sync::atomic::AtomicU32::new(0)); + let connection_count_server = connection_count.clone(); + + tokio::spawn(async move { + loop { + select! { + _ = shutdown_server.cancelled() => break, + accept_result = listener.accept() => { + if let Ok((stream, _)) = accept_result { + connection_count_server.fetch_add(1, std::sync::atomic::Ordering::SeqCst); + let shutdown_inner = shutdown_server.clone(); + tokio::spawn(async move { + let _ws_stream = match accept_async(stream).await { + Ok(ws) => ws, + Err(_) => return, + }; + + + // Become completely unresponsive - don't read any messages + select! { + _ = shutdown_inner.cancelled() => () + } + }); + } + } + } + } + }); + + let listener_fn = move |_data: Vec| { + // Handler for received messages - not needed for this test + }; + + let options = SubscriberOptions::default() + .with_backoff_initial_interval(Duration::from_millis(100)) + .with_ping_interval(Duration::from_millis(100)) + .with_pong_timeout(Duration::from_millis(200)) + .with_initial_grace_period(Duration::from_millis(50)); + + let mut subscriber = + WebsocketSubscriber::new(uri, listener_fn, Arc::new(Metrics::default()), options); + + let subscriber_task = { + let token_clone = shutdown.clone(); + tokio::spawn(async move { + subscriber.run(token_clone).await; + }) + }; + + // This needs to take into account the poll interval, pong deadline and the backoff + // interval. + sleep(Duration::from_secs(1)).await; + + let connections = connection_count.load(std::sync::atomic::Ordering::SeqCst); + assert!( + connections >= 2, + "Expected at least 2 connection attempts due to ping timeout, got {connections}" + ); + + shutdown.cancel(); + let _ = timeout(Duration::from_secs(1), subscriber_task).await; + } + + #[tokio::test] + async fn test_multiple_subscribers_single_listener() { + let server1 = MockServer::new().await; + let server2 = MockServer::new().await; + + let received_messages = Arc::new(Mutex::new(Vec::new())); + let received_clone = received_messages.clone(); + + let listener = move |data: Vec| { + if let Ok(mut messages) = received_clone.lock() { + messages.push(data); + } + }; + + let metrics = Arc::new(Metrics::default()); + + let token = CancellationToken::new(); + let token_clone1 = token.clone(); + let token_clone2 = token.clone(); + + let uri1 = server1.uri(); + let listener_clone1 = listener.clone(); + let metrics_clone1 = metrics.clone(); + + let mut subscriber1 = WebsocketSubscriber::new( + uri1.clone(), + listener_clone1, + metrics_clone1, + SubscriberOptions::default(), + ); + + let uri2 = server2.uri(); + let listener_clone2 = listener.clone(); + let metrics_clone2 = metrics.clone(); + + let mut subscriber2 = WebsocketSubscriber::new( + uri2.clone(), + listener_clone2, + metrics_clone2, + SubscriberOptions::default(), + ); + + let task1 = tokio::spawn(async move { + subscriber1.run(token_clone1).await; + }); + + let task2 = tokio::spawn(async move { + subscriber2.run(token_clone2).await; + }); + + sleep(Duration::from_millis(500)).await; + + let _ = server1.send_message("Message from server 1".as_bytes()).await; + let _ = server2.send_message("Message from server 2".as_bytes()).await; + + sleep(Duration::from_millis(500)).await; + + let _ = server1.send_message("Another message from server 1".as_bytes()).await; + let _ = server2.send_message("Another message from server 2".as_bytes()).await; + + // Wait for messages to be processed + sleep(Duration::from_millis(500)).await; + + // Cancel the token to shut down subscribers + token.cancel(); + let _ = timeout(Duration::from_secs(1), task1).await; + let _ = timeout(Duration::from_secs(1), task2).await; + + server1.shutdown().await; + server2.shutdown().await; + + let messages = match received_messages.lock() { + Ok(guard) => guard, + Err(poisoned) => poisoned.into_inner(), + }; + + assert_eq!(messages.len(), 4); + + assert!(messages.contains(&"Message from server 1".as_bytes().to_vec())); + assert!(messages.contains(&"Message from server 2".as_bytes().to_vec())); + assert!(messages.contains(&"Another message from server 1".as_bytes().to_vec())); + assert!(messages.contains(&"Another message from server 2".as_bytes().to_vec())); + + assert!(!messages.is_empty()); + } +} diff --git a/rust/rollup-boost/crates/websocket-proxy/tests/integration.rs b/rust/rollup-boost/crates/websocket-proxy/tests/integration.rs new file mode 100644 index 0000000000000..0be6f6a63f4da --- /dev/null +++ b/rust/rollup-boost/crates/websocket-proxy/tests/integration.rs @@ -0,0 +1,393 @@ +use axum::extract::ws::Message; +use futures::StreamExt; +use std::{ + collections::{HashMap, hash_map::Entry}, + error::Error, + net::SocketAddr, + sync::{Arc, Mutex}, + time::Duration, +}; +use tokio::{ + net::TcpListener, + sync::{broadcast, broadcast::Sender}, + task::JoinHandle, +}; +use tokio_tungstenite::connect_async; +use tokio_util::sync::CancellationToken; +use tracing::error; +use websocket_proxy::{ + auth::Authentication, metrics::Metrics, rate_limit::InMemoryRateLimit, registry::Registry, + server::Server, +}; + +struct TestHarness { + received_messages: Arc>>>, + clients_failed_to_connect: Arc>>, + current_client_id: usize, + cancel_token: CancellationToken, + server: Server, + server_addr: SocketAddr, + client_id_to_handle: HashMap>, + sender: Sender, +} + +impl TestHarness { + async fn alloc_port() -> SocketAddr { + let address = SocketAddr::from(([127, 0, 0, 1], 0)); + let listener = TcpListener::bind(&address).await.unwrap(); + listener.local_addr().unwrap() + } + fn new(addr: SocketAddr) -> TestHarness { + TestHarness::new_with_auth(addr, None) + } + + fn new_with_auth(addr: SocketAddr, auth: Option) -> TestHarness { + let (sender, _) = broadcast::channel(5); + let metrics = Arc::new(Metrics::default()); + let registry = Registry::new(sender.clone(), metrics.clone(), false, 120000); + let app_rate_limits = + if let Some(auth) = &auth { auth.get_rate_limits() } else { HashMap::new() }; + let rate_limited = Arc::new(InMemoryRateLimit::new(3, 10, app_rate_limits)); + + Self { + received_messages: Arc::new(Mutex::new(HashMap::new())), + clients_failed_to_connect: Arc::new(Mutex::new(HashMap::new())), + current_client_id: 0, + cancel_token: CancellationToken::new(), + server: Server::new(addr, registry, metrics, rate_limited, auth, "header".to_string()), + server_addr: addr, + client_id_to_handle: HashMap::new(), + sender, + } + } + + async fn healthcheck(&self) -> Result<(), Box> { + let url = format!("http://{}/healthz", self.server_addr); + let response = reqwest::get(url).await?; + match response.error_for_status() { + Ok(_) => Ok(()), + Err(e) => Err(e.into()), + } + } + + async fn start_server(&mut self) { + let cancel_token = self.cancel_token.clone(); + let server = self.server.clone(); + + // todo! + let _server_handle = tokio::spawn(async move { + _ = server.listen(cancel_token).await; + }); + + let mut healthy = true; + for _ in 0..5 { + let resp = self.healthcheck().await; + match resp { + Ok(_) => { + healthy = true; + break; + } + Err(_) => { + tokio::time::sleep(Duration::from_millis(25)).await; + } + } + } + + assert!(healthy); + } + + async fn can_connect(&mut self, path: &str) -> bool { + let uri = format!("ws://{}/{}", self.server_addr, path); + (connect_async(uri).await).is_ok() + } + + fn connect_client(&mut self) -> usize { + let uri = format!("ws://{}/ws", self.server_addr); + + let client_id = self.current_client_id; + self.current_client_id += 1; + + let results = self.received_messages.clone(); + let failed_conns = self.clients_failed_to_connect.clone(); + + let handle = tokio::spawn(async move { + let (ws_stream, _) = match connect_async(uri).await { + Ok(results) => results, + Err(_) => { + failed_conns.lock().unwrap().insert(client_id, true); + return; + } + }; + + let (_, mut read) = ws_stream.split(); + + loop { + match read.next().await { + Some(Ok(msg)) => { + match results.lock().unwrap().entry(client_id) { + Entry::Occupied(o) => { + o.into_mut().push(msg.to_string()); + } + Entry::Vacant(v) => { + v.insert(vec![msg.to_string()]); + } + }; + } + Some(Err(e)) => { + error!(message = "error receiving message", error = e.to_string()); + } + None => {} + } + } + }); + + self.client_id_to_handle.insert(client_id, handle); + client_id + } + + fn connect_unresponsive_client(&mut self) -> usize { + let uri = format!("ws://{}/ws", self.server_addr); + + let client_id = self.current_client_id; + self.current_client_id += 1; + + let failed_conns = self.clients_failed_to_connect.clone(); + + let handle = tokio::spawn(async move { + let (_ws_stream, _) = match connect_async(uri).await { + Ok(results) => results, + Err(_) => { + failed_conns.lock().unwrap().insert(client_id, true); + return; + } + }; + + // Do nothing - just keep the connection alive but don't read messages or respond to + // pings + loop { + tokio::time::sleep(Duration::from_millis(1000)).await; + } + }); + + self.client_id_to_handle.insert(client_id, handle); + client_id + } + + fn send_messages(&mut self, messages: Vec<&str>) { + for message_str in messages.iter() { + let message = Message::Binary(message_str.as_bytes().to_vec().into()); + self.sender.send(message).expect("failed to send message"); + } + } + + async fn wait_for_messages_to_drain(&mut self) { + let mut drained = false; + for _ in 0..5 { + let len = self.sender.len(); + if len > 0 { + tokio::time::sleep(Duration::from_millis(5)).await; + continue; + } else { + drained = true; + break; + } + } + assert!(drained); + } + + fn messages_for_client(&mut self, client_id: usize) -> Vec { + match self.received_messages.lock().unwrap().get(&client_id) { + Some(messages) => messages.clone(), + None => vec![], + } + } + + async fn stop_client(&mut self, client_id: usize) { + if let Some(handle) = self.client_id_to_handle.remove(&client_id) { + handle.abort(); + _ = handle.await; + } else { + panic!("client_id {client_id} not found") + } + } +} + +#[tokio::test] +async fn test_healthcheck() { + let addr = TestHarness::alloc_port().await; + let mut harness = TestHarness::new(addr); + assert!(harness.healthcheck().await.is_err()); + harness.start_server().await; + assert!(harness.healthcheck().await.is_ok()); +} + +#[tokio::test] +async fn test_clients_receive_messages() { + let addr = TestHarness::alloc_port().await; + + let mut harness = TestHarness::new(addr); + harness.start_server().await; + + let client_one = harness.connect_client(); + let client_two = harness.connect_client(); + + tokio::time::sleep(Duration::from_millis(100)).await; + + harness.send_messages(vec!["one", "two"]); + harness.wait_for_messages_to_drain().await; + + assert_eq!(vec!["one", "two"], harness.messages_for_client(client_one)); + assert_eq!(vec!["one", "two"], harness.messages_for_client(client_two)); +} + +#[tokio::test] +async fn test_server_limits_connections() { + let addr = TestHarness::alloc_port().await; + + let mut harness = TestHarness::new(addr); + harness.start_server().await; + + let client_one = harness.connect_client(); + let client_two = harness.connect_client(); + let client_three = harness.connect_client(); + let client_four = harness.connect_client(); + + tokio::time::sleep(Duration::from_millis(100)).await; + + harness.send_messages(vec!["one", "two"]); + harness.wait_for_messages_to_drain().await; + + assert_eq!(vec!["one", "two"], harness.messages_for_client(client_one)); + assert_eq!(vec!["one", "two"], harness.messages_for_client(client_two)); + assert_eq!(vec!["one", "two"], harness.messages_for_client(client_three)); + + // Client four was not able to be setup as the test has a limit of three + assert!(harness.messages_for_client(client_four).is_empty()); + assert!(harness.clients_failed_to_connect.lock().unwrap()[&client_four]); +} + +#[tokio::test] +async fn test_deregister() { + let addr = TestHarness::alloc_port().await; + + let mut harness = TestHarness::new(addr); + harness.start_server().await; + + assert_eq!(harness.sender.receiver_count(), 0); + + let client_one = harness.connect_client(); + let client_two = harness.connect_client(); + let client_three = harness.connect_client(); + + tokio::time::sleep(Duration::from_millis(100)).await; + + assert_eq!(harness.sender.receiver_count(), 3); + + harness.send_messages(vec!["one", "two"]); + harness.wait_for_messages_to_drain().await; + + assert_eq!(vec!["one", "two"], harness.messages_for_client(client_one)); + assert_eq!(vec!["one", "two"], harness.messages_for_client(client_two)); + assert_eq!(vec!["one", "two"], harness.messages_for_client(client_three)); + + harness.stop_client(client_three).await; + tokio::time::sleep(Duration::from_millis(100)).await; + + // It takes a couple of messages for dead clients to disconnect. + harness.send_messages(vec!["three"]); + harness.wait_for_messages_to_drain().await; + harness.send_messages(vec!["four"]); + harness.wait_for_messages_to_drain().await; + + // Client three is disconnected + assert_eq!(harness.sender.receiver_count(), 2); + + let client_four = harness.connect_client(); + tokio::time::sleep(Duration::from_millis(100)).await; + assert_eq!(harness.sender.receiver_count(), 3); + + harness.send_messages(vec!["five"]); + harness.wait_for_messages_to_drain().await; + harness.send_messages(vec!["six"]); + harness.wait_for_messages_to_drain().await; + + assert_eq!( + vec!["one", "two", "three", "four", "five", "six"], + harness.messages_for_client(client_one) + ); + assert_eq!( + vec!["one", "two", "three", "four", "five", "six"], + harness.messages_for_client(client_two) + ); + assert_eq!(vec!["one", "two"], harness.messages_for_client(client_three)); + assert_eq!(vec!["five", "six"], harness.messages_for_client(client_four)); +} + +#[tokio::test] +async fn test_authentication_disables_public_endpoint() { + let addr = TestHarness::alloc_port().await; + let auth = Authentication::none(); + + let mut harness = TestHarness::new_with_auth(addr, Some(auth)); + harness.start_server().await; + + assert!(!(harness.can_connect("ws").await)); +} + +#[tokio::test] +async fn test_authentication_allows_known_api_keys() { + let addr = TestHarness::alloc_port().await; + let auth = Authentication::new( + HashMap::from([ + ("key1".to_string(), "app1".to_string()), + ("key2".to_string(), "app2".to_string()), + ("key3".to_string(), "app3".to_string()), + ]), + HashMap::from([ + ("app1".to_string(), 10), + ("app2".to_string(), 10), + ("app3".to_string(), 10), + ]), + ); + + let mut harness = TestHarness::new_with_auth(addr, Some(auth)); + harness.start_server().await; + + assert!(harness.can_connect("ws/key1").await); + assert!(harness.can_connect("ws/key2").await); + assert!(harness.can_connect("ws/key3").await); + assert!(!(harness.can_connect("ws/key4").await)); +} + +#[tokio::test] +async fn test_ping_timeout_disconnects_client() { + let addr = TestHarness::alloc_port().await; + + let (sender, _) = broadcast::channel(5); + let metrics = Arc::new(Metrics::default()); + let registry = Registry::new(sender.clone(), metrics.clone(), true, 1000); + let rate_limited = Arc::new(InMemoryRateLimit::new(3, 10, HashMap::new())); + + let mut harness = TestHarness { + received_messages: Arc::new(Mutex::new(HashMap::new())), + clients_failed_to_connect: Arc::new(Mutex::new(HashMap::new())), + current_client_id: 0, + cancel_token: CancellationToken::new(), + server: Server::new(addr, registry, metrics, rate_limited, None, "header".to_string()), + server_addr: addr, + client_id_to_handle: HashMap::new(), + sender, + }; + + harness.start_server().await; + + let _client_id = harness.connect_unresponsive_client(); + tokio::time::sleep(Duration::from_millis(100)).await; + + assert_eq!(harness.sender.receiver_count(), 1); + + harness.sender.send(Message::Ping(vec![].into())).unwrap(); + tokio::time::sleep(Duration::from_millis(1500)).await; + + assert_eq!(harness.sender.receiver_count(), 0); +} diff --git a/rust/rollup-boost/deny.toml b/rust/rollup-boost/deny.toml new file mode 100644 index 0000000000000..2b0ec92bce736 --- /dev/null +++ b/rust/rollup-boost/deny.toml @@ -0,0 +1,48 @@ +[licenses] +allow = [ + "MIT", + "Apache-2.0", + "Apache-2.0 WITH LLVM-exception", + "Unicode-3.0", + "MPL-2.0", + "ISC", + "CC0-1.0", + "BSD-2-Clause", + "BSD-3-Clause", + "0BSD", + "Zlib", + "OpenSSL", + "CDLA-Permissive-2.0", + "Unlicense", +] +confidence-threshold = 0.8 + +[advisories] +ignore = [ + # paste crate is unmaintained + "RUSTSEC-2024-0436", + # backoff crate is unmaintained + "RUSTSEC-2025-0012", + # instant crate is unmaintained + "RUSTSEC-2024-0384", + # DoS vulnerability on alloy_dyn_abi::TypedData hashing, will be fixed on reth update + "RUSTSEC-2025-0073", +] + +[graph] +exclude = ["rollup-boost"] +exclude-dev = true + +[bans] +multiple-versions = "allow" +wildcards = "warn" +highlight = "all" + +[sources] +unknown-registry = "warn" +unknown-git = "warn" +allow-registry = ["https://github.com/rust-lang/crates.io-index"] +allow-git = [] + +[sources.allow-org] +github = ["paradigmxyz"] diff --git a/rust/rollup-boost/docs/NM_0411_0491_Security_Review_World_Rollup_Boost.pdf b/rust/rollup-boost/docs/NM_0411_0491_Security_Review_World_Rollup_Boost.pdf new file mode 100644 index 0000000000000..fdb975c27075b Binary files /dev/null and b/rust/rollup-boost/docs/NM_0411_0491_Security_Review_World_Rollup_Boost.pdf differ diff --git a/rust/rollup-boost/docs/design-philosophy-testing-strategy.md b/rust/rollup-boost/docs/design-philosophy-testing-strategy.md new file mode 100644 index 0000000000000..3df2932c1cdc0 --- /dev/null +++ b/rust/rollup-boost/docs/design-philosophy-testing-strategy.md @@ -0,0 +1,32 @@ +# Rollup-Boost: Design Philosophy and Testing Strategy + +## Design Philosophy + +Rollup-Boost occupies a critical position in the Op-stack architecture, serving as the bridge between the sequencer and the block builder. This positioning informs our entire design philosophy: **reliability above all else**. + +At its core, Rollup-Boost exists to ensure system liveness. The sequencer must be able to produce blocks even when external components like dedicated builders experience issues. This critical responsibility guides our primary design principle: + +**Minimize Complexity**: Rollup-Boost has a defined scope: it proxies and mirrors RPC requests for block building. While the system's position could enable additional features (like broadcasting transactions too), we deliberately limit scope creep. If a new feature introduces complexity and potential failure points, we carefully evaluate its inclusion. We prioritize rock-solid core functionality over feature richness. In particular, we focus on minimizing complexity on the hot path, keeping critical components like the proxy layer as simple and reliable as possible. + +Looking ahead, we are considering restructuring Rollup-Boost into a modular rollup-boost-sdk library. This approach would make it easier to add different flavors or strategies safely. By extracting core functionality into a well-tested library, we could enable various implementations while maintaining the reliability of critical components. This architectural evolution would help balance innovation with our commitment to stability on the critical path. + +### Architecture Decisions + +The `src/proxy.rs` component contains the core logic of mirroring engine requests and stands as the most critical part of the system. We maintain this component with exceptional care, recognizing that changes here carry the highest risk. Despite its importance, this module remains relatively simple by design. + +## Testing Strategy + +We employ a layered testing strategy that provides defense in depth: + +**Unit Tests** verify individual components, but as this is a distributed system, they only get us so far. + +**Integration Tests** serve as our most critical testing layer. Located in `tests`, these tests use a simulated environment to verify system behavior under various conditions: + +- How does the system respond when the builder produces invalid blocks? +- What happens when the builder experiences high latency? +- How does the system behave during execution mode transitions? +- Can the system recover when the builder becomes unavailable and later returns? + +Each integration test creates a complete test environment with mock L2 and builder nodes, simulating real-world scenarios that exercise the system's resilience features. One limitation of this approach is that issues in the mock CL node might not surface in these tests, offering quick feedback but potentially missing certain edge cases. + +**End-to-End Tests** (planned) will use actual components in a production-like environment. These tests using Builder Playground will help ensure our test assumptions match real-world behavior. diff --git a/rust/rollup-boost/docs/flashblocks.md b/rust/rollup-boost/docs/flashblocks.md new file mode 100644 index 0000000000000..942e57a0feaf3 --- /dev/null +++ b/rust/rollup-boost/docs/flashblocks.md @@ -0,0 +1,142 @@ +# Flashblocks + +Flashblocks is a feature in rollup-boost that enables pre-confirmations by proposing incremental sections of blocks. This guide walks you through setting up a complete Flashblocks environment with rollup-boost, op-rbuilder, and a fallback builder. + +## Overview + +The setup consists of three main components: + +- **rollup-boost**: The main service with Flashblocks enabled +- **op-rbuilder**: A builder with Flashblocks support +- **op-reth**: A fallback builder (standard EL node) + +## Prerequisites + +- Rust toolchain installed +- Access to the rollup-boost and op-rbuilder repositories + +## Setup Instructions + +### 1. Start rollup-boost with Flashblocks + +Launch rollup-boost with Flashblocks enabled: + +```bash +cargo run --bin rollup-boost -- \ + --l2-url http://localhost:5555 \ + --builder-url http://localhost:4445 \ + --l2-jwt-token 688f5d737bad920bdfb2fc2f488d6b6209eebda1dae949a8de91398d932c517a \ + --builder-jwt-token 688f5d737bad920bdfb2fc2f488d6b6209eebda1dae949a8de91398d932c517a \ + --rpc-port 4444 \ + --flashblocks \ + --log-level info +``` + +This command uses the default Flashblocks configuration. For custom configurations, see the [Flashblocks Configuration](#flashblocks-configuration) section below. + +### 2. Generate Genesis Configuration + +Navigate to the op-rbuilder directory and create a genesis file: + +```bash +cd op-rbuilder +cargo run -p op-rbuilder --bin tester --features testing -- genesis > genesis.json +``` + +### 3. Start the op-rbuilder + +Launch op-rbuilder with Flashblocks enabled using the generated genesis file: + +```bash +cargo run --bin op-rbuilder -- node \ + --chain genesis.json \ + --datadir data-builder \ + --port 3030 \ + --flashblocks.enabled \ + --disable-discovery \ + --authrpc.port 4445 \ + --authrpc.jwtsecret ./crates/op-rbuilder/src/tests/framework/artifacts/test-jwt-secret.txt \ + --http +``` + +**Note**: The JWT token is located at `./crates/op-rbuilder/src/tests/framework/artifacts/test-jwt-secret.txt` and matches the configuration used in rollup-boost. + +### 4. Start the Fallback Builder + +Launch op-reth as a fallback builder: + +```bash +op-reth node \ + --chain genesis.json \ + --datadir one \ + --port 3131 \ + --authrpc.port 5555 \ + --disable-discovery \ + --authrpc.jwtsecret ./crates/op-rbuilder/src/tests/framework/artifacts/test-jwt-secret.txt +``` + +This runs a standard op-reth execution layer node that serves as the fallback builder for rollup-boost. + +### 5. Simulate the Consensus Layer + +Use the built-in tester utility to simulate a consensus layer node: + +```bash +cargo run -p op-rbuilder --bin tester --features testing -- run +``` + +## Configuration Details + +### Port Configuration + +- `4444`: rollup-boost RPC port +- `4445`: op-rbuilder auth RPC port (matches rollup-boost builder URL) +- `5555`: op-reth auth RPC port (matches rollup-boost L2 URL) +- `3030`: op-rbuilder P2P port +- `3131`: op-reth P2P port + +### Flashblocks Configuration + +rollup-boost provides several configuration options for Flashblocks functionality: + +#### Basic Flashblocks Flag + +- `--flashblocks`: Enable Flashblocks client (required) + - Environment variable: `FLASHBLOCKS` + +#### WebSocket Connection Settings + +- `--flashblocks-builder-url `: Flashblocks Builder WebSocket URL + + - Environment variable: `FLASHBLOCKS_BUILDER_URL` + - Default: `ws://127.0.0.1:1111` + +- `--flashblocks-host `: Flashblocks WebSocket host for outbound connections + + - Environment variable: `FLASHBLOCKS_HOST` + - Default: `127.0.0.1` + +- `--flashblocks-port `: Flashblocks WebSocket port for outbound connections + - Environment variable: `FLASHBLOCKS_PORT` + - Default: `1112` + +#### Connection Management + +- `--flashblock-builder-ws-reconnect-ms `: Timeout duration if builder disconnects + - Environment variable: `FLASHBLOCK_BUILDER_WS_RECONNECT_MS` + - No default value specified + +#### Example with Custom Configuration + +```bash +cargo run --bin rollup-boost -- \ + --l2-url http://localhost:5555 \ + --builder-url http://localhost:4445 \ + --rpc-port 4444 \ + --flashblocks \ + --flashblocks-builder-url ws://localhost:9999 \ + --flashblocks-host 0.0.0.0 \ + --flashblocks-port 2222 \ + --flashblock-builder-ws-reconnect-ms 5000 \ + --log-level info +``` diff --git a/rust/rollup-boost/docs/load-testing-acceptance-criteria-template.md b/rust/rollup-boost/docs/load-testing-acceptance-criteria-template.md new file mode 100644 index 0000000000000..1eee04e72a36f --- /dev/null +++ b/rust/rollup-boost/docs/load-testing-acceptance-criteria-template.md @@ -0,0 +1,157 @@ +## Overview + +This document serves as a generic acceptance criteria checklist template to be run through before deploying network upgrades through Rollup Boost. Copy this checklist and fill it out with the appropriate data for your specific chain before upgrading your network.. + +## **Environment** +- **Network:** [Specify the network] +- **RPC Endpoint:** [Specify the blockchain network RPC URL] +- **Contender Load Testing Scenarios:** +- **Block Builder Version:** [Specify the deployed version] +- **OP Stack Version:** [GitHub link to release] +- **Test Period:** [Specify the test period] + +## **Acceptance Criteria Checklist** +### **1. Overall Network Performance** + +🎯 **Peak Gas Utilization (GPS)** +- **Description:** Measures the highest observed gas consumption per second in the network. +- **Expected:** 5 million GPS +- **Measured:** [Insert measured value] +- **Supporting Data:** [Link to results] + +🎯 **Peak Transactions per Second (TPS)** +- **Description:** Measures the maximum number of transactions successfully processed per second. +- **Expected:** [Expected TPS] +- **Measured:** [Insert measured value] +- **Supporting Data:** [Link to results] + +🎯 **Average Block Time** +- **Description:** Time taken between consecutive block proposals. +- **Expected:** 1 second +- **Measured:** [Insert measured value] +- **Supporting Data:** [Link to results] + +--- + +### **2. Chain Operator Performance Metrics** + +***RPC Ingress Metrics (Transaction Handling)*** + +🎯 **Peak Burst Transactions per Second (TPS)** +- **Description:** Measures the highest rate at which transactions are submitted to the RPC within a short burst. +- **Expected:** 1,000 TPS +- **Measured:** [Insert measured value] +- **Supporting Data:** [Link to results] + +🎯 **Sustained Transactions per Second (TPS)** +- **Description:** Measures the average transaction ingress rate over a sustained 20-minute period. +- **Expected:** 200 TPS +- **Measured:** [Insert measured value] +- **Supporting Data:** [Link to results] + +***RPC Latency Metrics*** + +🎯 **Average RPC Response Latency** +- **Description:** Measures the typical response time for RPC requests. +- **Expected:** ≤ 60 ms +- **Measured:** [Insert measured value] +-**Supporting Data:** [Link to results] + +🎯 **Max RPC Response Latency** +- **Description:** Measures the worst-case response time for RPC requests. +- **Expected:** ≤ 120 ms +- **Measured:** [Insert measured value] +- **Supporting Data:** [Link to results] + +***RPC Egress (Block Builder Communication)*** + +🎯 **Average Block Builder Response Latency** +- **Expected:** ≤ 60 ms +- **Measured:** [Insert measured value] +- **Supporting Data:** [Link to results] + +🎯 **Max Block Builder Response Latency** +- **Expected:** ≤ 100 ms +- **Measured:** [Insert measured value] +- **Supporting Data:** [Link to results] + +***Sequencer & Rollup Boost Health Checks*** + +🎯 **Sequencer Resource Utilization (CPU, Memory, Disk, Network)** +- **Expected:** No resource exhaustion or anomalies. +- **Measured:** [Insert measured value] +- **Supporting Data:** [Link to results] + +🎯 **Rollup Boost Resource Utilization (CPU, Memory, Disk, Network)** +- **Expected:** No resource exhaustion or anomalies. +- **Measured:** [Insert measured value] +- **Supporting Data:** [Link to results] + +🎯 **RPC Endpoint Stability** +- **Expected:** No downtime or unexpected crashes. +- **Measured:** [Insert measured value] +- **Supporting Data:** [Link to results] + +🎯 **Non-Transaction RPC Method Stability** +- **Description:** Ensures non-transaction-related RPC methods function without failure. +- **Expected:** No failures. +- **Measured:** [Insert measured value] +- **Supporting Data:** [Link to results] + +--- + +### **3. Block Builder Performance Metrics** + +🎯 **Missed Block Rate** +- **Description:** Percentage of expected blocks that were not produced. +- **Expected:** ≤ 10% +- **Measured:** [Insert measured value] +- **Supporting Data:** [Link to results] + +🎯 **End-to-End Block Build Latency** + +**Description:** Time taken from transaction ingress to finalized block production. +- **Expected:** ≤ 100 ms +- **Measured:** [Insert measured value] +- **Supporting Data:** [Link to results] + +🎯 **Block Submission Response Latency** +- **Expected:** ≤ 100 ms +- **Measured:** [Insert measured value] +- **Supporting Data:** [Link to results] + +🎯 **Block Builder Health Checks (CPU, Memory, Disk, Network)** +- **Expected:** No resource exhaustion or anomalies. +- **Measured:** [Insert measured value] +- **Supporting Data:** [Link to results] + +--- + +### **4. System-Wide Reliability Checks** + +🎯 **100% Transaction Forwarding Accuracy** +- **Description:** Ensures all transactions sent to the system are correctly received and logged. +- **Expected:** 100% transaction acceptance. +- **Measured:** [Insert measured value] +- **Supporting Data:** [Link to results] + +🎯 **Observability: Full Transaction Traceability** +- **Description:** Verifies that transactions can be traced end-to-end across all infrastructure components. +- **Expected:** Given a random transaction hash during load testing, timestamps for each step in the transaction pipeline can be observed. +- **Measured:** + - **Client Submission Timestamp:** [Measured result] + - **Chain Operator Ingress Timestamp:** [Measured result] + - **Block Builder Ingress Timestamp:** [Measured result] + - **Block Submission Timestamp:** [Measured result] + +--- + +### **Final Decision** + +✅ **All criteria met?** +- **Decision:** [Yes/No] +- **Date:** [Specify the test period] + +**Notes & Recommendations** + +• [Add any additional observations, issues, or follow-up actions] \ No newline at end of file diff --git a/rust/rollup-boost/docs/reorgs.md b/rust/rollup-boost/docs/reorgs.md new file mode 100644 index 0000000000000..492bd23697df6 --- /dev/null +++ b/rust/rollup-boost/docs/reorgs.md @@ -0,0 +1,6 @@ + +# Reorgs in Rollup-Boost + +Rollup-boost remains unaffected by blockchain reorganizations due to its stateless design as a pure proxy layer between the consensus layer (op-node) and execution engines. During the sequencing process, when the sequencer derives L2 blocks from L1 data within sequencing windows (ranges of L1 blocks spanning the sequencer window size, currently defaulting to 3600 epochs), any reorgs that occur in the underlying L1 chain or affect the sequencing window are handled transparently by rollup-boost's forwarding mechanism. + +When reorgs impact the sequencing epoch derivation or cause drift in the L2 chain state, rollup-boost simply proxies all Engine API calls—including fork choice updates reflecting the new canonical chain and payload requests for reorg recovery—directly to both the builder and local execution client without maintaining any state about the reorganization. The actual reorg handling, including re-deriving the correct L2 blocks from the updated sequencing windows and managing any resulting drift, is performed by the underlying execution engines (e.g op-geth, op-reth) which receive these reorg signals through the standard Engine API methods that rollup-boost transparently forwards. diff --git a/rust/rollup-boost/docs/rollup-boost-ha.md b/rust/rollup-boost/docs/rollup-boost-ha.md new file mode 100644 index 0000000000000..41c753e931705 --- /dev/null +++ b/rust/rollup-boost/docs/rollup-boost-ha.md @@ -0,0 +1,245 @@ +# Table of Contents + +- [Context/Scope](#contextscope) + - [Goals](#goals) + - [Non Goals](#non-goals) +- [Design](#design) + - [Overview](#overview) + - [Health Checks](#health-checks) + - [Execution Mode](#execution-mode) + - [Debug API](#debug-api) + - [Failure Scenarios](#failure-scenarios) + +# Context/Scope + +The current OP Stack sequencer HA design relies on `op-conductor` to manage a cluster of sequencers. Each node runs a local conductor instance, and the cluster forms a Raft quorum to elect a single leader responsible for block production. The conductor continuously monitors the health of the sequencer, electing a new leader when the current leader is unhealthy. The leader notifies it's local sequencer to run in sequencing mode, allowing `op-node` to send FCUs with payload attributes signaling the execution client to build a new payload. All follower instances are run without sequencer mode enabled, ensuring that only one sequencer is producing blocks at a time. + +

+ +

+ +With the introduction of `rollup-boost`, an additional component is introduced that sits in-between `op-node` and `op-geth` that forwards Engine API requests to an external builder. + +This design document outlines the architecture, components, and failure strategies required for HA `rollup-boost`. The proposed design prioritizes fault tolerance, liveliness, horizontal scalability, and minimal failover time while maintaining upstream compatibility with `op-conductor`. + +This design is structured to be forward compatible with Flashblocks, however it does not define the specifics of Flashblocks handling, streaming, or HA guarantees. A separate design document will extend this work to detail how Flashblocks will be integrated into the OP Stack with support for HA. + +## Goals + +- Explore HA designs for `rollup-boost` prioritizing liveliness, fault tolerance and horizontal scalability for external block builders. +- Maintain compatibility with `op-conductor` and its sequencing assumptions. +- Explore designs that are forward compatible with the Flashblocks spec. + +## Non Goals + +- Define how Flashblocks are handled, consumed or streamed to the network. +- Define how pending transactions are relayed/peered across the builders/sequencer execution clients. +- Monitoring / alerting strategies. This can be specified in a separate document once an architecture is solidified. + +
+ +# Design + +## Overview + +The following design builds on the existing HA sequencer setup by introducing a `rollup-boost` instance between each `op-node` and its local `op-geth` instance. In this model, each `rollup-boost` is paired with a single external builder and default execution client. When `op-node` sends an FCU containing payload attributes, `rollup-boost` forwards the request to both the default execution client and its paired builder. + +Upon receiving a `get_payload` request from `op-node`, `rollup-boost` forwards the call to both the builder and default execution client. If the builder returns a payload, it is validated via a `new_payload` call to the sequencer's local execution client. If the builder payload is invalid or unavailable, `rollup-boost` falls back to the local execution client’s payload. + +![1:1 Builder to Rollup Boost](../assets/1-1-builder-rb.png) + +In the event of sequencer failover, `op-conductor` elects a new leader, promoting a different `op-node` along with its associated `rollup-boost` and builder instance. Since each builder is isolated and only serves requests from its local `rollup-boost`, no coordination between builders is required. This separation mirrors the existing HA model of the OP Stack, extending it to external block production. + +This approach is operationally simple and relies on the same liveness and fault tolerance guarantees already provided by the OP Stack's sequencer HA setup. Note that `rollup-boost` does not currently feature a block selection policy and will optimistically select the builder's block for validation. In the event of a bug in the builder, it is possible valid, but undesirable blocks (eg. empty blocks) are produced. Without a block selection policy, `rollup-boost` will prefer the builder's block over the default execution client. If the builder produces undesirable but valid blocks, operators must either manually disable external block production via the `rollup-boost` debug API, disable the block builder directly (causing health checks to fail), or manually select a new sequencer leader. Proper monitoring alerting can help mitigate this but further designs should be explored to introduce safeguards into rollup-boost directly rather than relying on the builder implementation being correct. + +Below is a happy path sequence diagram illustrating how `rollup-boost` facilitates payload construction/validation: + +```mermaid +sequenceDiagram + participant CL as op-node + participant RB as rollup-boost + participant EL as op-geth + participant B as Builder + + %% FCU forwarded with payload attrs + Note over CL: FCU with Attributes + CL->>RB: engine_forkChoiceUpdated(..., Attrs) + RB->>EL: engine_forkChoiceUpdated(..., Attrs) + RB->>B: engine_forkChoiceUpdated(..., Attrs) + B->>B: Start payload job + + %% get_payload from builders + CL->>RB: engine_getPayload(PayloadId) + RB->>EL: engine_getPayload(PayloadId) + RB->>B: engine_getPayload(PayloadId) + B-->>RB: {executionPayload, blockValue} + EL-->>RB: {executionPayload, blockValue} + + %% block selection policy + RB->>RB: Optimistically select builder block + + %% Validate best payload + Note over RB, EL: Validate builder block + RB->>EL: engine_newPayload(ExecutionPayload) + EL->>RB: {status: VALID, ...} + + %% Propagate block to builders + Note over RB: Propagate new block to builder + RB->>B: engine_newPayload(ExecutionPayload) + + %% Return final payload to op-node + RB->>CL: {executionPayload, blockValue} + +``` + +
+ +## Health Checks + +In high availability deployments, `op-conductor` must assess the full health of the block production path. Rollup Boost will expose a composite `/healthz` endpoint to report on both builder synchronization and payload production status. These checks allow `op-conductor` to detect degraded block building conditions and make informed leadership decisions. + +Rollup Boost will continuously monitors two independent conditions to inform the health of the builder and the default execution client: + +- **Builder Synchronization**: + A background task periodically queries the builder’s latest unsafe block via `engine_getBlockByNumber`. The task compares the timestamp of the returned block to the local system time. If the difference exceeds a configured maximum unsafe interval (`max_unsafe_interval`), the builder is considered out of sync. Failure to fetch a block from the builder or detection of an outdated block timestamp results in the health status being downgraded to Partial. If the builder is responsive and the block timestamp is within the acceptable interval, the builder is considered synchronized and healthy. Alternatively instead of periodic polling, builder synchronization can be inferred if the builder returns a `VALID` response to a `newPayload` call forwarded from Rollup Boost. + +- **Payload Production**: + During each `get_payload` request, Rollup Boost will verify payload availability from both the builder and the execution client. If the builder fails to deliver a payload, Rollup Boost will report partial health. If the execution client fails to deliver a payload, Rollup Boost will report unhealthy. + +`op-conductor` should also be configurable in how it interprets health status for failover decisions. This allows chain operators to define thresholds based on their risk tolerance and operational goals. For example, operators may choose to maintain leadership with a sequencer reporting `206 Partial Content` to avoid unnecessary fail overs or they may configure `op-conductor` to immediately fail over when any degradation is detected. This flexibility allows the chain operator to configure a failover policy that aligns with network performance expectations and builder reliability. + +
+ +| Condition | Health Status | +|:----------|:--------------| +| Builder is synced and both execution client and builder return payloads | `200 OK` (Healthy) | +| Builder is out of sync| `206 Partial Content` (Partially Healthy) | +| Builder fails to return payload on `get_payload` request | `206 Partial Content` (Partially Healthy) | +| Execution client fails to return payload on `get_payload` request | `503 Service Unavailable` (Unhealthy) | + +`op-conductor` should query the `/healthz` endpoint exposed by Rollup Boost in addition to the existing execution client health checks. Health should be interpreted as follows: + +- `200 OK` (Healthy): The node is fully healthy and eligible for leadership. +- `206 Partial Content` (Partially Healthy): The node is degraded but may be considered for leadership if configured by operator +- `503 Service Unavailable` (Unhealthy): The node is unhealthy and must be excluded from leadership. + +During normal operation and leadership transfers, `op-conductor` should prioritize sequencer candidates in the following order: + +1. Prefer nodes reporting `200 OK`. +2. Nodes that return `503 Service Unavailable` are treated as unhealthy and must not be eligible for sequencer leadership. `op-conductor` should offer a configuration option to treat nodes returning `206 Partial Content` as either healthy or unhealthy. + +Rollup Boost instances that are not actively sequencing rely on the builder sync check to report health, as they are not producing blocks. This behavior mirrors the existing `op-conductor` health checks for inactive sequencers and ensures readiness during failover without compromising network liveness guarantees. Note that `op-conductor` will still evaluate existing sequencer health checks to determine overall sequencer health. + +Note that in the case where the builder is unhealthy, `rollup-boost` should bypass forwarding block production requests to the builder entirely and immediately use the default execution client for payload construction. This avoids introducing unnecessary latency to wait for the builder response to timeout. + +When builder health is restored, normal request forwarding and payload selection behavior will resume. + +
+ +## Execution mode + +`ExecutionMode` is a configuration setting that controls how `rollup-boost` interacts with the external builder during block production. Execution mode can be set either at startup via CLI flags or dynamically modified at runtime through the [Debug API](#debug-api). +Operators can use `ExecutionMode` to selectively forward or bypass builder interactions, enabling dry runs during deployments or fully disabling external block production during emergencies. + +The available execution modes are: + +- `Enabled` + - `rollup-boost` forwards all Engine API requests to both the builder and default execution client. + - Optimistically selects the builder’s payload for validation and block publication. + - Falls back to the local execution client *only* if the builder fails to produce a payload or the payload is invalid. + - Default setting for normal external block production. + +- `DryRun` + - `rollup-boost` forwards all Engine API requests to both the builder and default execution client. + - Builder payloads are validated with the local execution client but the default execution client block will always be returned to `op-node` to propagate to the network. + - Useful during deployments, dry runs, or to validate builder behavior without publishing builder blocks to the network. + +- `Disabled` + - `rollup-boost` does not forward any Engine API requests to the builder. + - Block construction is handled exclusively by the default execution client. + - Useful as an emergency shutoff switch in the case of critical failures/emergencies. + +```rust +pub enum ExecutionMode { + /// Forward Engine API requests to the builder, validate builder payloads and propagate to the network + Enabled, + /// Forward Engine API requests to the builder, validate builder payloads but + /// fallback to default execution payload + DryRun, + // Do not forward Engine API requests to the builder + Disabled, +} +``` + +
+ +## Debug API + +`rollup-boost` exposes a Debug API that allows operators to inspect and modify the current execution mode at runtime without restarting the service. This provides flexibility to dynamically enable, disable, or dry-run external block production based on builder behavior or network conditions. The Debug API is served over HTTP using JSON RPC and consists of the following endpoints: + +### `debug_setExecutionMode` + +Sets the current execution mode for `rollup-boost`. + +**Request**: + +``` +{ + "method": "debug_setExecutionMode", + "params": [ "enabled" | "dry_run" | "disabled" ], + "id": 1, + "jsonrpc": "2.0" +} +``` + +**Response**: + +``` +{ + "result": null, + "id": 1, + "jsonrpc": "2.0" +} +``` + +### `debug_getExecutionMode` + +Retrieves the current execution mode. + +**Request**: + +``` +{ + "method": "debug_getExecutionMode", + "params": [], + "id": 1, + "jsonrpc": "2.0" +} +``` + +**Response:** + +``` +{ + "result": "enabled" | "dry_run" | "disabled", + "id": 1, + "jsonrpc": "2.0" +} +``` + +
+ +## Failure Scenarios + +Below is a high level summary of how each failure scenario is handled. All existing failure modes assumed by upstream `op-conductor` are maintained: + +| Failure Scenario | Category | Scenario and Solution | +| --- | --- | --- | +| Leader Sequencer Execution Client Fails | Sequencer Failure | `op-conductor` will detect an unhealthy status from both `rollup-boost` and pre-existing sequencer health checks, causing Conductor to elect a new leader. Once the default execution client has recovered, `rollup-boost` will update it's health status to `200` and the sequencer will continue operating normally as a follower. | +| Follower Sequencer Execution Client Fails | Sequencer Failure | Both `rollup-boost` and pre-existing sequencer health checks will report "unhealthy". Once the default execution client has recovered, `rollup-boost` will update it's health status to `200` and the sequencer will continue operating normally as a follower. In the event of leadership transfer, this sequencer instance will not be considered for leadership.| +| Leader `rollup-boost` Fails | Rollup Boost Failure | Leader sequencer `rollup-boost` becomes unhealthy, causing `op-conductor`s sequencer health checks to fail and attempt to elect a new leader. This failure mode is the same as a typical leader sequencer failure. Once the sequencer recovers, it will continue to participate in the cluster as a follower| +| Follower `rollup-boost` Fails | Rollup Boost Failure | Follower sequencer `rollup-boost` becomes unhealthy. The leader sequencer is unaffected. Once the sequencer recovers, it will continue to participate in the cluster as a follower.| +| Leader Builder Stops Producing Blocks | Builder Failure | The builder associated with the sequencer leader stops producing new payloads. `rollup-boost` will detect the builder failure via background health checks and downgrade its health status to partial. This will result in `rollup-boost` ignoring the builder and selecting the default execution client's payload for block production. If `op-conductor` is configured to failover upon partial `rollup-boost` health, a new leader will attempt to be elected. Once the builder recovers and resumes payload production, `rollup-boost` will update its health to `200` and resume with normal operation. | +| Leader Builder Falls Out of Sync | Builder Failure | The builder associated with the sequencer leader falls out of sync with the chain head. `rollup-boost` will detect the unsynced state via the background health checks and downgrade its health status to partial. This will result in `rollup-boost` ignoring builder payloads and selecting the default execution client payload for block until the builder is resynced. If `op-conductor` is configured to failover upon partial `rollup-boost` health, a new leader will attempt to be elected. Once the builder recovers, `rollup-boost` will update its health to `200` and resume with normal operation. | +| Follower Builder Falls Out of Sync | Builder Failure | The builder associated with a follower sequencer falls out of sync with the chain head. Block production is unaffected while the node remains a follower. In the event a leader election occurs and `op-conductor` is configured to treat partial health as "unhealthy", this instance will not be eligible for leadership. Once the builder recovers, `rollup-boost` will report `200 OK` and resume normal operation.| +| Leader Builder Producing Bad Blocks| Builder Failure| In this scenario, the builder is "healthy" but producing bad blocks (eg. empty blocks). If the builder block passes validation via a `new_payload` call to the default execution client, it will be proposed to the network. Manual intervention is needed to either switch to a different sequencer or shutoff the builder. Further mitigation can be introduced via block selection policy allowing `rollup-boost` to select the "healthiest" block. Currently, it is unclear what block selection policy would provide the strongest guarantees.| diff --git a/rust/rollup-boost/docs/running-rollup-boost.md b/rust/rollup-boost/docs/running-rollup-boost.md new file mode 100644 index 0000000000000..7147658b79b4a --- /dev/null +++ b/rust/rollup-boost/docs/running-rollup-boost.md @@ -0,0 +1,84 @@ +# Running Rollup Boost + +Rollup boost acts as a proxy between the proposer and its execution engine. It is stateless and can be run with a regular sequencer setup or in a high availability setup with `op-conductor`. + +![rollup-boost-architecture](../assets/rollup-boost-architecture.png) + +## Regular Sequencer Setup + +To run rollup boost with a regular sequencer setup, change the `--l2` flag in the proposer `op-node` to point to the rollup boost rpc endpoint. + +To configure rollup-boost, set the l2 url to the url of the proposer auth rpc endpoint and the builder url to the builder auth rpc endpoint. + +```bash +cargo run --bin rollup-boost -- --l2-url http://localhost:8551 --builder-url http://localhost:8546 +``` + +To set up a builder, you can use [`op-rbuilder`](https://github.com/flashbots/op-rbuilder) with an op-node instance and have rollup-boost point to the builder auth rpc endpoint. It is recommended that boost sync is enabled on rollup-boost to sync the builder with the proposer op-node to remove the p2p networking overhead. In testing, this reduces latency significantly from around 200-300 milliseconds to just 3-4 milliseconds in local environments. + +Depending on the block time of the rollup, you can set the `builder_timeout` flag for failsafe guarantees such that rollup-boost will not wait too long for a builder to respond. The default timeout is 200ms, with the assumption that the builder will be geographically close to the proposer. There is also a `l2_timeout` flag which is set to 2000ms by default, which is the timeout for the local execution engine to respond to the proposer. + +Optionally, you can set a separate builder jwt token or path to the proposer jwt token via the `--builder-jwt-token` and `--builder-jwt-path` flags. If not provided, the proposer jwt token will be used. + +## High Availability Setup with Rollup Boost + +One potential way to setup rollup boost in a high availability setup is with `op-conductor` to run multiple instances of rollup boost and have them proxy to the same builder. + +While this does not ensure high availability for the builder, the chain will have a high availability setup for the fallback node. If the proposer execution engine is healthy, `op-conductor` will assume the sequencer is healthy. + +![rollup-boost-op-conductor](../assets/rollup-boost-op-conductor.png) + +### Health Checks + +`rollup-boost` supports the standard array of kubernetes probes: + +- `/healthz` Returns various status codes to communicate `rollup-boost` health - 200 OK - The builder is producing blocks - 206 Partial Content - The l2 is producing blocks, but the builder is not - 503 Service Unavailable - Neither the l2 or the builder is producing blocks + `op-conductor` should eventually be able to use this signal to switch to a different sequencer in an HA sequencer setup. In a future upgrade to `op-conductor`, A sequencer leader with a healthy (200 OK) EL (`rollup-boost` in our case) could be selected preferentially over one with an unhealthy (206 or 503) EL. If no ELs are healthy, then we can fallback to an EL which is responding with `206 Partial Content`. + +- `/readyz` Used by kubernetes to determine if the service is ready to accept traffic. Should always respond with `200 OK` + +- `/livez` determines whether or not `rollup-boost` is live (running and not deadlocked) and responding to requests. If `rollup-boost` fails to respond, kubernetes can use this as a signal to restart the pod. Should always respond with `200 OK` + +## Observability + +To check if the rollup-boost server is running, you can check the health endpoint: + +``` +curl http://localhost:8081/healthz +``` + +### Metrics + +To enable metrics, you can set the `--metrics` flag. This will start a metrics server which will run on port 9090 by default. To see the list of metrics, you can checkout [metrics.rs](../src/metrics.rs) and ping the metrics endpoint: + +``` +curl http://localhost:9090/metrics +``` + +All spans create duration histogram metrics with the name "{span_name}\_duration". Currently, this list includes: + +- fork_choice_updated_v3_duration +- get_payload_v3_duration +- new_payload_v3_duration + +Additionally, execution engines such as op-rbuilder has rpc metrics exposed to check if `engine_getPayloadV3` requests have been received. To check if the builder blocks are landing on-chain, the builder can be configured to include a builder transaction in the block, which is captured as part of the builder metrics. To see more details about observability in the op-builder, you can check op-rbuilder's [README](https://github.com/flashbots/rollup-boost?tab=readme-ov-file#rollup-boost). + +### Tracing + +Tracing is enabled by setting the `--tracing` flag. This will start exporting traces to the otlp endpoint specified in the `--otlp-endpoint` flag. This endpoint is set to `http://localhost:4317` by default. + +Traces use the payload id to track the block building lifecycle. A distributed tracing system such as [Jaeger](https://www.jaegertracing.io/) can be used to visualize when the proposer triggers block building via `engine_forkchoiceUpdatedV3` and retrieve the block with `engine_getPayloadV3`. + +## Troubleshooting Builder Responses + +### Invalid Builder Payloads + +If there are logs around the builder payload being invalid, it is likely there is an issue with the builder and you will need to contact the builder operator to resolve it. In this case rollup-boost will use the local payload and chain liveness will not be affected. You can also manually set rollup-boost to dry run mode using the debug api to stop payload requests to the builder, silencing the error logs. + +It is also possible that either the builder or the proposer execution engine are not running on compatible hard fork versions. Please check that the clients are running on compatible versions of the op-stack. + +### Builder Syncing + +Alternatively, the builder may be syncing with the chain and not have a block to respond with. You can see in the logs the builder is syncing by checking whether the payload_status of builder calls is `SYNCING`. + +This is expected if the builder is still syncing with the chain. Chain liveness will not be affected as rollup-boost will use the local payload. Contact the builder operator if the sync status persists as the builder op-node may be offline or not peered correctly with the network. diff --git a/rust/rollup-boost/specs/flashblocks.md b/rust/rollup-boost/specs/flashblocks.md new file mode 100644 index 0000000000000..6d018680f8b30 --- /dev/null +++ b/rust/rollup-boost/specs/flashblocks.md @@ -0,0 +1,1029 @@ +*Authors: [Ferran](https://github.com/ferranbt), [Dmarz](https://github.com/dmarzzz), [Shana](https://github.com/avalonche), [0xkitsune](https://github.com/0xkitsune), [protolambda](https://github.com/protolambda), [Anton](https://github.com/0x416e746f6e), [Joshua](https://github.com/trianglesphere)* + +**Table of Contents** +- [Abstract](#abstract) +- [Prerequisites](#prerequisites) +- [Motivation](#motivation) +- [Specification](#specification) + - [Terminology](#terminology) + - [Parameters](#parameters) + - [Data structures](#data-structures) + - [**`FlashblocksPayloadV1`**](#flashblockspayloadv1) + - [**`ExecutionPayloadFlashblockResultV1`**](#executionpayloadflashblockresultv1) + - [**`ExecutionPayloadStaticV1`**](#executionpayloadstaticv1) + - [**`Metadata`**](#metadata) + - [**`AccountMetadata`**](#accountmetadata) + - [**`StorageSlot`**](#storageslot) + - [**`TransactionMetadata`**](#transactionmetadata) + - [System architecture](#system-architecture) + - [Out-of-Protocol Design](#out-of-protocol-design) + - [In-Protocol vs. Out-of-Protocol](#in-protocol-vs-out-of-protocol) + - [Design Rationale and Benefits](#design-rationale-and-benefits) + - [Implications for This Specification](#implications-for-this-specification) + - [Assumptions About Op Stack](#assumptions-about-op-stack) + - [Flashblock Lifecycle](#flashblock-lifecycle) + - [Flashblock Construction Process](#flashblock-construction-process) + - [Handling of Sequencer Transactions](#handling-of-sequencer-transactions) + - [Transaction Inclusion Heuristics](#transaction-inclusion-heuristics) + - [Post-block Execution Rules](#post-block-execution-rules) + - [Construction Steps](#construction-steps) + - [Flashblocks Metadata](#flashblocks-metadata) + - [Alternative Design Consideration](#alternative-design-consideration) + - [Rationale for Including State Roots in Flashblocks](#rationale-for-including-state-roots-in-flashblocks) + - [Non-Blocking Block Production](#non-blocking-block-production) + - [Builder Availability and System Reliability](#builder-availability-and-system-reliability) + - [Future Design Considerations](#future-design-considerations) + - [Builder-to-Rollup-boost Communication Flow](#builder-to-rollup-boost-communication-flow) + - [Flashblock Validity Rules](#flashblock-validity-rules) + - [Flashblock System Invariants](#flashblock-system-invariants) + - [Flashblock Propagation](#flashblock-propagation) + - [Secure propagation](#secure-propagation) + - [Flashblock JSON-RPC APIs](#flashblock-json-rpc-apis) + - [Ethereum JSON RPC Modifications](#ethereum-json-rpc-modifications) + - [op\_supportedCapabilities](#op_supportedcapabilities) +- [Reliability and Operational Considerations](#reliability-and-operational-considerations) + - [Transaction Propagation](#transaction-propagation) + - [Failover scenarios](#failover-scenarios) + - [Block Builder](#block-builder) + - [The Sequencer or Rollup-boost](#the-sequencer-or-rollup-boost) + - [Integration with High Availability Sequencer Setups](#integration-with-high-availability-sequencer-setups) + - [Faults](#faults) + - [Safety Faults](#safety-faults) + - [Liveness Faults](#liveness-faults) +- [Rationale](#rationale) + - [Why out-of-protocol](#why-out-of-protocol) + - [Why not shorter block times](#why-not-shorter-block-times) +- [Backwards Compatibility](#backwards-compatibility) + - [End Users](#end-users) + - [Infrastructure Operators](#infrastructure-operators) +- [Implementation](#implementation) + + +# Abstract + +Introduces a standard for partial blocks called “Flashblocks,” inspired but not entirely identical to [Solana Shreds](https://github.com/solana-foundation/specs/blob/main/p2p/shred.md), enabling rapid preconfirmations on Ethereum Layer 2 networks such as OP Stack. Flashblocks propagate transaction batches incrementally and expose their state via a modified Ethereum JSON-RPC interface, giving users immediate feedback equivalent to drastically reduced block times without modifying underlying the underlying OP Stack protocol. Flashblocks can be combined with Trusted Execution Environment technology to enable quick verifiability over various networks of machines in addition to protection from equivocation. + +# Prerequisites + +This document assumes knowledge of the terminology, definitions, and other material in + +- [🔗 Ethereum Optimism Protocol Specs](https://github.com/ethereum-optimism/specs/tree/main/specs/protocol) +- [🔗 OP Stack Engine API](https://specs.optimism.io/protocol/exec-engine.html#engine-api) +- [🔗 External Block Production in OP Stack Design Doc](https://github.com/ethereum-optimism/design-docs/blob/main/protocol/external-block-production.md) +- [🔗 Ethereum Execution APIs](https://github.com/ethereum/execution-apis/tree/main) +- [🔗 Introducing Rollup-Boost - Launching on Unichain](https://writings.flashbots.net/introducing-rollup-boost) +- [🔗 Rollup-boost design doc](https://www.notion.so/RFD-1-Rollup-boost-1996b4a0d876802f95d1c98387e38162?pvs=21) + +# Motivation + +As of April 2025, Layer 2 (L2) protocols built with the OP Stack have a minimum block time of one second, imposing significant constraints on user experience. The limitation on minimum block times is primarily historical and architectural, reflecting earlier assumptions of Ethereum network as well as deeply-integrated type definitions, from the L2 blockchain client all the way down to smart contracts on the L1, making modification a very large task. + +Due to similar constraints on Ethereum Layer 1, preconfirmations have gained attention as a promising method to decouple blockchain user experiences from rigid block-time limitations and sidestep the longstanding debate between block time and block size. Existing preconfirmation solutions predominantly depend on economic security in the form of cryptoeconomic mechanisms such as staking. as well as focus on per-transaction preconfirmations, inadvertently pushing protocols into the “Latency Auction” region of the [MEV Trilemma](https://writings.flashbots.net/introducing-rollup-boost). Furthermore, previous approaches have often introduced entirely new Ethereum JSON-RPC methods, presenting substantial integration barriers and hindering practical adoption. + +Inspired by modern blockchain networks like Solana and Celestia, Flashblocks introduce an “out-of-protocol” standard for incremental delivery of partial blocks containing batches of transactions. This approach significantly reduces perceived latency for end-users and improves network bandwidth without modifying underlying protocol rules, offering a streamlined path for incremental adoption by node operators and existing infrastructure. + +# Specification + +## Terminology + +All terms, actors, and components are used in this document identically to how they are defined in the [OP Stack protocol definition](https://github.com/ethereum-optimism/specs/blob/main/specs/glossary.md). + +Additional terms introduced: + +- **External Block Builder** - External Block Builders are first introduced to the OP Stack in the [External Block Production Design Document](https://github.com/ethereum-optimism/design-docs/blob/main/protocol/external-block-production.md) ****where they are described as an external party that the Sequencer can request blocks from. +- **Rollup Boost** - A sidecar piece of software first introduced without name in the [External Block Production Design Document](https://github.com/ethereum-optimism/design-docs/blob/main/protocol/external-block-production.md) with two roles: + 1. obfuscate the presence of External Block Builder software from the `op-node` and `op-geth` software + 2. manage communication from the sequencer with External Block Builders and handle block delivery to `op-node` . +- **Fallback EL** - The standard Execution Layer of the Sequencer, used by Rollup Boost as a fallback mechanism when it cannot successfully build a block through the External Block Builder. This is an unmodified EL node that maintains the ability to construct valid blocks according to standard OP Stack protocol rules. +- **RPC Provider** - Ethereum RPC software operator with the purpose of serving Ethereum state. + +## Parameters + +| **Constant** | **Value** | **Description** | +| --- | --- | --- | +| `FLASHBLOCKS_TIME` | 200ms | Default wall clock time per flashblock. | +| `FLASHBLOCKS_PER_L2_BLOCK` | `L2_BLOCK_TIME`/`FLASHBLOCKS_TIME` | Supported number of flashblocks per L2 block. (Ex: 2s/200ms = 10 Flashblocks) | +| `MAX_EXTRA_DATA_BYTES` | 32 | Extra data size in an Optimism block | +| `BYTES_PER_LOGS_BLOOM` | 256 | Size of a logs bloom field in an Optimism block | + +## Data structures + +### **`FlashblocksPayloadV1`** + +The core data structure sent from the Block Builder to Rollup Boost and then external parties. A container representing a Flashblock payload, encapsulating block deltas, base configuration, and additional metadata. + +```python +class FlashblocksPayloadV1(): + version: Bytes4 + payload_id: Bytes8 + parent_flash_hash: Optional[Bytes32] + index: uint64 + static: Optional[ExecutionPayloadStaticV1] + diff: ExecutionPayloadFlashblockResultV1 + metadata: FlashblocksMetadata +``` + +**Field descriptions:** + +- `payload_id`: PayloadID is an identifier of the payload build process. The same for all flashblocks. +- `index`: Index of the Flashblock within the parent block. +- `parent_flash_hash`: SSZ hash of the parent flashblock in the sequence. For the first flashblock (index 0), the field is empty. +- `base` *(Optional)*: Reference execution payload serving as the unchanging base configuration. +- `diff`: Container with fields representing changes from the base payload. +- `metadata`: Supplementary information about the execution of the flashblock. For example: account state changes, storage modifications, transaction receipts. + +### **`ExecutionPayloadFlashblockResultV1`** + +Container encoding only the mutable portions of the execution payload updated during Flashblock construction. + +```python +class ExecutionPayloadFlashblockResultV1(): + state_root: Bytes32 + receipts_root: Bytes32 + logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM] + gas_used: uint64 + block_hash: Bytes32 + transactions: List[Transaction] + withdrawals: List[Withdrawal] + withdrawals_root: Bytes32 +``` + +**Field descriptions:** + +- `state_root`: Root hash of the post-execution state trie. +- `receipts_root`: Root hash of the transaction receipts trie. +- `logs_bloom`: Bloom filter of all logs emitted by the block. +- `gas_used`: Gas consumed by included transactions. +- `block_hash`: Final hash of the completed execution block. +- `transactions`: List of transactions included in the Flashblock. +- `withdrawals`: Withdrawals included (as per Optimism specification). Must be non-nil but empty when `withdrawals_root` is used directly. +- `withdrawals_root`: OP-Stack Isthmus specific field: instead of computing the root from a withdrawals list, set it directly. The "withdrawals" list attribute must be non-nil but empty. + +**Supporting Type Definitions** + +- `Transaction`: Transaction bytes as per execution payload specification. +- `Withdrawal`: Standard Ethereum Capella withdrawal container. + +All fields in this structure represent the cumulative state of the entire block up to and including the current flashblock, not just the changes from this specific flashblock. + +### **`ExecutionPayloadStaticV1`** + +Container representing immutable fundamental block properties established at initial block creation, unchanged throughout construction. + +```python +class ExecutionPayloadStaticV1(): + parent_beacon_block_root: Bytes32 + parent_hash: Bytes32 + fee_recipient: ExecutionAddress + prev_randao: Bytes32 + block_number: uint64 + gas_limit: uint64 + timestamp: uint64 + extra_data: ByteList[MAX_EXTRA_DATA_BYTES] + base_fee_per_gas: uint256 +``` + +**Field descriptions:** + +- `parent_beacon_block_root`: Ecotone parent beacon block root. +- `parent_hash`: Hash of the parent execution block. +- `fee_recipient`: Address receiving transaction fees. +- `prev_randao`: Previous block’s RANDAO reveal for randomness. +- `block_number`: Sequential execution block number. +- `gas_limit`: Maximum allowable gas consumption for the block. +- `timestamp`: Unix timestamp at block creation. +- `extra_data`: Arbitrary extra data bytes included in the block header. +- `base_fee_per_gas`: Base fee per gas unit at the block. + +### **`Metadata`** + +Container encapsulating all metadata for a flashblock, including account state changes and transaction results. + +```python +class FlashblockMetadata(): + accounts: List[AccountMetadata] + transactions: List[TransactionMetadata] +``` + +**Field descriptions:** + +- `accounts`: List of accounts with modified state in this flashblock. +- `transactions`: List of transaction execution results in this flashblock. + +### **`AccountMetadata`** + +Container representing account state changes included in the Flashblock metadata. It is used by providers to fulfill the RPC requests. + +```python +class AccountMetadata(): + address: ExecutionAddress + balance: Optional[uint256] + nonce: uint64 + code: Optional[Bytes] + storage_slots: List[StorageSlot] +``` + +**Field descriptions:** + +- `address`: Ethereum address of the affected account. +- `balance`: Updated account balance after the Flashblock's execution (None if unchanged). +- `nonce`: Updated account nonce (transaction count) after the Flashblock's execution. +- `code_created`: Contract bytecode if created in this Flashblock. +- `storage_slots`: List of modified storage slots and their new values. + +Storage slot keys must be de-duplicated (only the final value for each key should be included) and sorted in ascending byte order for deterministic processing. + +### **`StorageSlot`** + +Container representing a single modified storage slot within an account. + +```python +class StorageSlot(): + key: Bytes32 + value: Bytes32 +``` + +**Field descriptions:** + +- `key`: Storage slot location (32-byte key). +- `value`: New value stored at this slot after the Flashblock's execution. + +### **`TransactionMetadata`** + +Container representing succinct transaction execution results. + +```python +class TransactionMetadata(): + status: uint8 + gas_used: uint64 + contract_address: Optional[ExecutionAddress] +``` + +**Field descriptions:** + +- `status`: Execution status (1 for success, 0 for failure). +- `gas_used`: Amount of gas used by this specific transaction. +- `contract_address`: Address of created contract (None for non-creation transactions). + +## System architecture + +The following diagram illustrates the Flashblocks system architecture, showing the relationships between key components: + +```mermaid + +flowchart LR + subgraph Sequencer + ON[OP Node] + RB[Rollup Boost] + FEL[Fallback EL] + BB[Block Builder] + end + + subgraph Network + WSP[WebSocket Proxy] + end + + subgraph Clients + RPC[RPC Providers] + Users[End Users] + end + + ON --> RB + RB --> FEL + RB <--> BB + RB --> WSP + WSP --> RPC + RPC --> Users +``` + +This architecture shows the flow of data through the Flashblocks system: + +1. The **OP Node** initiates block production and sends requests to **Rollup Boost** +2. **Rollup Boost** coordinates between multiple components: + - It communicates with the **Block Builder** to create Flashblocks + - It maintains a connection to the **Fallback EL** for reliability if the Block Builder fails + - It propagates validated Flashblocks to the network via the **WebSocket Proxy** +3. The **WebSocket Proxy** distributes Flashblocks to multiple **RPC Providers** +4. **RPC Providers** serve preconfirmation data to **End Users** + +The rest of this document provides detailed specifications for each component and their interactions, explaining the protocols, data structures, and operational considerations. + +## Out-of-Protocol Design + +The Flashblocks specification follows a deliberate "out-of-protocol" design philosophy. This section clarifies what we mean by this term and explains its implications for the OP Stack ecosystem. + +### In-Protocol vs. Out-of-Protocol + +In the context of OP Stack, "in-protocol" components form the core protocol itself. These components implement fundamental consensus rules, are required for basic rollup functionality, and need standardization across all participants. Modifying in-protocol components requires protocol-level changes and network-wide upgrades. + +By contrast, "out-of-protocol" components like Flashblocks operate as optional extensions to the core protocol. They can be added or removed without breaking the consensus rules of the network, though they may still impact network performance or operations if implemented poorly. + +The only in-protocol guarantee that Flashblocks must uphold is producing valid blocks at the established block time interval (typically 1-2 seconds in current OP Stack implementations). + +### Design Rationale and Benefits + +The out-of-protocol design for Flashblocks emerged from practical constraints during initial development. Without strong coordination with the OP Stack team at the outset, and given the complexity of the challenge, working within the existing protocol boundaries was the most pragmatic approach. + +This constraint ultimately proved beneficial, as it forced the design to be minimally invasive. Flashblocks can be implemented immediately on any OP Stack chain without waiting for protocol upgrades or network-wide consensus. + +Any issues with the Flashblocks implementation remain isolated from the core protocol, protecting overall network stability. In case of serious problems, Flashblocks can be disabled entirely, allowing the system to revert to normal operation without disrupting the underlying rollup. This clean fallback mechanism benefits from the centralized trust model of L2s, where the sequencer has the authority to quickly enact such operational changes without requiring network-wide consensus. + +Now that the usefulness of the system has been proven, as more collaboration venues with the OP Stack team emerge, integrating parts of Flashblocks directly into the protocol could provide even stronger guarantees and open the design space for future innovations. We are considering that approach too in the future. + +### Implications for This Specification + +Most elements defined in this document are out-of-protocol components that operate as extensions to the core OP Stack. The only hard guarantee the system must provide is that valid blocks are delivered at the expected intervals. + +Everything else—from how Flashblocks are constructed and propagated to how RPC providers implement preconfirmation caches—represents iterative improvements designed to achieve the goal of faster user feedback as efficiently and impactfully as possible. + +This means the specification describes a recommended implementation path rather than rigid protocol requirements. Components can evolve independently without requiring protocol-level coordination, and implementations may vary in how they achieve the same functional goals. + +## Assumptions About Op Stack + +The Flashblocks design makes several assumptions about OP Stack behavior: + +- **Quick Response for engine_getPayload**: We assume that `engine_getPayload` requests should return as quickly as possible for a normal and healthy chain. +- **Deterministic Payload IDs**: While not specific to Flashblocks but to Rollup Boost in general, we assume that payload IDs from different execution layer nodes are deterministically computed for the same ForkChoiceUpdate request. This is not explicitly enforced in specifications, but execution layers tend to maintain this consistency as a practical implementation detail. + +## Flashblock Lifecycle + +Note that familiarity with Rollup-boost is expected throughout this entire document, as Flashblocks is designed as an extension built on top of the existing Rollup-boost architecture. + +The lifecycle of a Flashblock begins with the Sequencer initiating block creation and ends with a normal L2 block consisting of all delivered flashblocks propagating according to the OP Stack protocol. The process proceeds as follows: + +1. **Fork Choice Update**: + + The Sequencer initiates the block-building cycle by sending an `engine_forkchoiceUpdated` with attributes call to Rollup Boost as it normally would to its local Execution Engine. + +2. **Fork Choice Update Forwarding**: + + Rollup Boost forwards the `engine_forkchoiceUpdated` call concurrently to: + + - The Sequencer’s local Execution Engine + - The External Block Builder +3. **Flashblock Construction**: + + Upon receiving the fork choice update, the External Block Builder constructs and continuously delivers `FlashblocksPayloadV1` at intervals defined by `FLASHBLOCKS_TIME` following the **Flashblocks Construction Process** defined in this document. + + It's important to emphasize that during this process, the External Block Builder sends only the incremental changes in each Flashblock, not the full block state each time. Each `FlashblocksPayloadV1` contains just the delta from the previous state (new transactions, updated state roots, etc.), allowing for efficient bandwidth usage and faster propagation. + + Only the first Flashblock (with `index` 0) includes the `static` field containing immutable block data, while subsequent Flashblocks omit this field since this information remains constant throughout the block's construction. Each Flashblock includes a `parent_flash_hash` that references the SSZ hash of the previous Flashblock in the sequence, creating a hash-linked chain within the block. + + The combined information received across all flashblocks is sufficient to fully reconstruct the complete block without any additional data. + +4. **Flashblock Validation and Propagation**: + + For each received `FlashblocksPayloadV1`, Rollup Boost validates it against the Sequencer’s local Execution Engine and according to the **Flashblocks Validity Rules** defined in this document. Upon successful validation, Rollup Boost propagates the payload to all subscribed Flashblock-compatible RPC providers. + +5. **Preconfirmed State Updates**: + + Flashblock-compatible RPC providers insert validated payloads into their local Preconfirmed State Overlay, providing immediate preconfirmation states to end-users via Flashblock-enhanced Ethereum JSON-RPC endpoints. + +6. **Final L2 Block Delivery**: + + When the Sequencer calls `engine_getPayload`, Rollup Boost returns a single coherent block payload based on the validated Flashblocks received since the last fork choice update. Note that this does not require additional external requests or any last-minute processing. + +7. **Full Block Propagation**: + + The Sequencer propagates the aggregated block following standard OP Stack protocol rules. + + +```mermaid +sequenceDiagram + participant S as Sequencer Driver (op-node) + participant RB as Rollup Boost + participant EE as Sequencer Execution Engine (op-geth) + participant BB as External Block Builder + participant RPC as Flashblock RPC Providers + participant U as End Users + + rect rgb(230,247,255) + Note over S: 1. **Fork Choice Update** + S->>RB: engine_forkchoiceUpdated + end + + rect rgb(255,242,230) + Note over RB: 2. **Fork Choice Update Forwarding** + RB->>EE: engine_forkchoiceUpdated + RB->>BB: engine_forkchoiceUpdated + end + + rect rgb(230,255,235) + loop **Flashblock Construction** (every FLASHBLOCKS_TIME) + Note over BB: **Flashblock Construction** + BB->>RB: FlashblocksPayloadV1 + + rect rgb(252,244,255) + Note over RB, EE: 4. **Flashblock Validation and Propagation** + RB->>EE: Validate Payload + EE-->>RB: Validation Result + + alt Success + RB->>RPC: Propagate Valid Payload + Note over RPC: 5. **Preconfirmed State Updates** + RPC->>RPC: Update State Overlay + RPC->>U: Serve Preconfirmed State + else Failure + RB-->>RB: Discard and fallback + end + end + end + end + + rect rgb(255,249,196) + Note over S, RB: 6. **Final L2 Block Delivery** + S->>RB: engine_getPayload + RB->>S: Aggregate Payload + end + + rect rgb(240,240,240) + Note over S: 7. **Full Block Propagation** + S->>S: Propagate Block (standard OP Stack) + end +``` + +## Flashblock Construction Process + +The External Block Builder initiates the construction of Flashblocks upon receiving a fork choice update (`engine_forkchoiceUpdated`) call forwarded by Rollup Boost. The construction of Flashblocks follows a defined sequence of steps repeated every `FLASHBLOCKS_TIME` interval, ensuring consistent, incremental, and ordered propagation of preconfirmed state to end-users. It's important to note that `FLASHBLOCKS_TIME` serves as a target interval rather than a strictly enforced rule in Rollup Boost. + +### Handling of Sequencer Transactions + +An important protocol rule that the Flashblock construction process must adhere to involves handling "system transactions" within the OP Stack. These include deposits and system transactions that arrive with the Fork Choice Update (FCU) and must always be executed as the first transactions in any valid block. + +From an "in-protocol" perspective, a block is not considered valid if these sequencer transactions are missing. Consequently, the minimum valid block that can be constructed must include the execution of these transactions. + +The External Block Builder follows this mandate by executing these sequencer transactions first and including them in the initial Flashblock (index 0). This serves as the foundation upon which all subsequent Flashblocks in the sequence will build. + +When processing these mandatory sequencer transactions, the builder does not apply the same gas allocation heuristics used for regular transactions in later Flashblocks. While these transactions do consume gas like any other transaction, they receive special handling as they must be included regardless of gas considerations to maintain protocol validity. + +### Transaction Inclusion Heuristics + +As part of the flashblock construction process, the External Block Builder makes sophisticated decisions about transaction inclusion. Unlike rigid gas limit enforcement, the decision of when to stop including transactions in a flashblock involves nuanced heuristics that may evolve over time. + +The builder must balance multiple factors when deciding which transactions to include in each flashblock: + +- Optimizing for user experience by providing quick feedback +- Ensuring transactions with higher gas usage aren't permanently excluded +- Maintaining efficient gas utilization across the entire block +- Accounting for execution time constraints within the `FLASHBLOCKS_TIME` window + +In some cases, the builder might include a transaction that exceeds what would be a strict per-flashblock gas limit because executing that transaction is important for user experience or economic reasons. This flexibility is a key advantage of the out-of-protocol approach. + +The specific heuristics for transaction allocation across flashblocks are intentionally not prescribed in this specification. Rather than codifying particular strategies, we leave this as an area where builders can innovate and optimize. Different chains can develop custom heuristics based on their specific transaction patterns, user expectations, and economic models. As implementations mature, we expect some general principles will emerge for handling common scenarios, but this specification intentionally avoids prematurely constraining this design space. + +### Post-block Execution Rules + +In the OP Stack protocol, certain operations such as withdrawals and system requests are applied at the end of block execution. Since each flashblock must function as a valid standalone block for preconfirmation purposes, these post-block execution rules must be applied at the end of each flashblock's construction. + +When constructing a flashblock, the builder applies all required post-block operations after executing the selected transactions. These operations modify the state according to protocol rules, ensuring the flashblock represents a complete and valid block state. + +However, an important implementation detail is that these post-block changes must be reverted before beginning execution for the next flashblock. This reversion is necessary because the post-block operations should only be applied once per actual L2 block, not cumulatively for each flashblock. Failing to revert these changes would lead to their repeated application across multiple flashblocks, potentially creating invalid cumulative state and ultimately an invalid final block. + +### Construction Steps + +After handling the mandatory sequencer transactions in the initial Flashblock, the External Block Builder proceeds with constructing subsequent Flashblocks by following these steps for each interval: + +1. **Transaction Selection** + +- Retrieve transactions from local or external mempool: +- Prioritize and sort transactions based on predefined sequencing policies, such as priority ordering or by MEV paid. + +2. **Transaction Execution** + +- Sequentially execute selected transactions against a state snapshot derived from the current execution payload base (ExecutionPayloadBaseV1) or the last validated flashblock +- Apply the transaction inclusion heuristics described earlier to determine when to stop including transactions +- After transaction execution completes, apply all post-block execution rules as described in the Post-Block Execution Rules section + +3. **Flashblock Payload Assembly** + +- After transaction execution, compute and record the following execution state updates: + - `state_root`: The new post-execution state root resulting from the executed transactions. + - `receipts_root`: The receipts trie root derived from execution outcomes. + - `logs_bloom`: Aggregated logs bloom from all emitted transaction logs within this flashblock. + - `gas_used`: Total gas consumed by executed transactions. + - `transactions`: Serialized transaction payloads included within the flashblock. + - `withdrawals` (if applicable): Withdrawals executed during the current flashblock interval (as per OP Stack withdrawal specification). + - `block_hash`: Computed block hash uniquely identifying this flashblock execution state. + + Note that each flashblock builds upon the state of all previous flashblocks, with these fields reflecting the cumulative state after applying the new transactions in this particular flashblock. + +- Encapsulate these computed updates into `ExecutionPayloadFlashblockResultV1`. + +5. **Flashblock Indexing and Metadata** + +- Assign a monotonically incremented `index` to the newly constructed Flashblock payload. +- Compute the SSZ hash of the previous Flashblock and assign it as the `parent_flash_hash` (for the first Flashblock with index 0, this field is empty) + +6. **Flashblock Delivery** + +- Package the `index`, `payload_id`, `ExecutionPayloadFlashblockDeltaV1`, and metadata into a `FlashblocksPayloadV1` payload. +- Deliver the assembled `FlashblocksPayloadV1` payload promptly to Rollup Boost via the designated Flashblocks submission API. + +7. **Subsequent Flashblock Construction** + +- Immediately after successful delivery, increment the Flashblock `index`. +- Revert any post-block execution changes as described in the Post-Block Execution Rules section +- Reset the transaction execution context based on the newly delivered state. +- Begin constructing the next `FlashblocksPayloadV1` payload, repeating from step 1 until a termination condition is reached (e.g., end of block building period via `engine_getPayload` request). + +8. **Flashblock Construction Termination** + +- Flashblock construction continues iteratively until: + - Rollup Boost signals final block aggregation and propagation via engine_getPayload. + - A failure or timeout condition arises requiring failover procedures, detailed separately. + +```mermaid +sequenceDiagram + participant BB as External Block Builder + participant RB as Rollup Boost + participant EE as Execution Engine (local) + participant M as Mempool + + loop Every FLASHBLOCKS_TIME + BB->>M: Retrieve and Prioritize Transactions + M-->>BB: Transactions batch + Note over BB: Execute transactions sequentially + BB->>EE: Execute transactions and compute state root + EE-->>BB: Execution results (state root, receipts, gas used) + + Note over BB: Construct Flashblock Delta + BB->>BB: Assemble FlashblocksPayloadV1 (state_root, receipts_root, logs_bloom, gas_used, block_hash, txs, withdrawals, metadata) + + BB->>RB: Submit FlashblocksPayloadV1 + RB-->>BB: Acknowledge reception (async) + Note over BB: Increment index, prepare next Flashblock + end +``` + +## Flashblocks Metadata + +The `FlashblocksPayloadV1` structure defined above contains the minimum required data for Rollup Boost to return a valid block. The `metadata` field provides additional information to enable preconfirmations. + +This metadata contains supplementary information about the execution state that is not strictly necessary for block construction but is valuable for RPC providers to offer comprehensive preconfirmation services. Examples of such metadata include: + +- Account state changes (which accounts have been modified) +- Updated account balances +- Storage slot modifications +- Contract deployment information +- Detailed transaction execution results + +### Alternative Design Consideration + +While this specification includes detailed metadata in Flashblocks, a viable alternative would be for RPC providers to execute transactions themselves as they receive them through the stream. In this approach, providers would receive only transaction data, execute them in order, maintain their own state cache, and use it to fulfill RPC requests. This would significantly reduce bandwidth requirements by eliminating metadata transmission. + +## Rationale for Including State Roots in Flashblocks + +One of the most discussed aspects of the Flashblocks design is the decision to include state roots with every flashblock. This section explains the rationale behind this design choice. + +### Non-Blocking Block Production + +We operate under the assumption that `engine_getPayload` requests should return quickly with a valid, complete block. This assumption, which we believe to be correct based on our understanding of the OP Stack, guides our design decisions. + +Currently in OP Stack implementations, execution layer nodes compute payloads in the background and can return them immediately when requested via `engine_getPayload`. This allows for near-instant responses, maintaining the flow of block production without delays. For Flashblocks to provide similar performance, it must have all block components - including state roots - readily available when `engine_getPayload` is called. + +Without pre-computed state roots for each flashblock, Rollup Boost would face a critical decision when handling `engine_getPayload`: + +1. **Request the state root from the Execution Layer**: This approach would be problematic because the Execution Layer does not maintain a "hot" state that matches the current flashblock sequence. It would need to apply all pending transactions and compute a new state root, which is exactly the operation we're trying to optimize with flashblocks. +2. **Request the state root from the External Block Builder**: This would require an additional synchronous request to the builder with a protocol which is not engine-specific. Not only does this introduce an extra communication hop and latency, but it also creates a single point of failure - if the builder is unavailable at that moment, Rollup Boost cannot fulfill the request and we fall into the failure path rather than the happy path. + +### Builder Availability and System Reliability + +The key advantage of including state roots with each flashblock is system reliability. By having state roots immediately available, Rollup Boost can respond to `engine_getPayload` requests without additional external dependencies at that critical moment. + +Without pre-included state roots, a builder failure at the moment of block production would force the system to either: + +1. Recompute the entire state from scratch (time-consuming and potentially disruptive) +2. Fail to produce a block on time (violating protocol assumptions) +3. Be unable to fulfill the preconfirmations that have already been exposed to users + +### Future Design Considerations + +This approach represents our current understanding of the optimal design given existing constraints. However, as mentioned in the Out-of-Protocol Design section, alternative approaches may be worth exploring as we gain production experience. Future iterations might consider different state root handling approaches, particularly in the context of high-availability sequencer setups and deeper integration with OP Stack components. + +## Builder-to-Rollup-boost Communication Flow + +Rollup Boost maintains an open WebSocket connection with the External Block Builder. Through this persistent connection, the builder pushes the `FlashblocksPayloadV1` payloads as soon as they're constructed, without waiting for requests from Rollup Boost. + +If the WebSocket connection goes down, the builder buffers (queues) the messages internally and attempts to resend them once the connection is restored. This buffering only applies for the current block being built; when a new block cycle begins, any queued messages from the previous block are discarded as they are no longer relevant to the current state. + +**SSZ Encoding for Flashblocks Messages** + +Flashblocks messages transmitted between the Block Builder and Rollup Boost use Simple Serialize (SSZ) for binary encoding. Unlike JSON or other self-describing formats, SSZ is schema-less and does not embed field names or type information in the serialized data. This makes explicit versioning necessary, especially in a streaming context where message types cannot be inferred from surrounding context. + +For the `FlashblocksPayloadV1` structure, a version field is placed as the first field in the container. + +This design leverages SSZ's deterministic encoding characteristics, where fixed-size fields like `Bytes4` appear at predictable offsets in the serialized data. When a recipient receives a serialized Flashblocks message over the WebSocket stream: + +1. It first reads the initial 4 bytes to determine the message version +2. Based on the version identifier, it selects the appropriate container structure for deserializing the remainder of the data + +## Flashblock Validity Rules + +For a flashblock to be considered valid the following must hold: + +- **Monotonically Increasing Payload Index:** Each successive Flashblock payload delivered within the same L2 block cycle must have an index exactly one greater than the previous payload. Any skipped indices or duplicated indices constitute a violation. When a violation occurs, Rollup Boost will ignore the invalid flashblock and maintain its internal state, only updating when it receives a new flashblock with the correct next index value. +- **Immutable Payload Base:** Immutable block header fields (`parent_hash`, `block_number`, `prev_randao`, etc.) set by the initial `ExecutionPayloadBaseV1` cannot be altered by subsequent Flashblocks during the same L2 block period. +- **Execution Validity:** Every Flashblock must be validated successfully against the Sequencer’s local execution engine state to ensure OP protocol-level correctness. +- **Valid Full Block:** Every flashblock, when combined with prior flashblocks, should be a valid L2 Block without requiring Rollup Boost to perform any additional operations other than repackaging the data structure. This means that state roots are calculated on each Flashblock contrary to publication due to the out-of-protocol nature of the implementation. + + A flashblock is considered a valid block if: + + - It includes the first flashblock (with index 0 containing the base data) + - It comprises a continuous sequence of flashblocks with incrementing indices. + +## Flashblock System Invariants + +The following invariants must hold true for the Flashblocks protocol to function reliably: + +- **No Equivocation:** At no point should multiple distinct Flashblocks for the same payload index be delivered or propagated to RPC subscribers. +- **Preconfirmation Preservation:** The system always gives strong preference to maintaining the integrity of issued preconfirmations. Once a transaction has been included in a flashblock and made visible to users as preconfirmed, the system will prioritize preserving this state over other considerations such as block value optimization or alternative builder selection. + +## Flashblock Propagation + +Once Rollup Boost has validated a flashblock, it is then propagated to the rest of the network to be included in each RPC Provider’s Preconfirmation Cache. + +```mermaid +sequenceDiagram + participant B as Block Builder + participant R as Rollup-boost + box Node + participant RPC as JSON-RPC Interface + end + participant U as Users + + B->>R: Preconfirmation batches + R->>R: Validate batches + R->>RPC: Preconfirmation updates + U->>RPC: Standard RPC queries + note right of U: Regular users +``` + +Flashblocks Compatible RPC Providers subscribe to the Flashblocks websocket stream from Rollup Boost and maintain an in-memory representation of the preconfirmation state. RPC providers validate that the flashblock sequence is correct before updating their preconfirmation state. This preconfirmation state is ephemeral, maintained only until the corresponding block is propagated and the information becomes available through standard chain state. + +Throughout the entire propagation path, flashblocks are transmitted in binary SSZ-encoded format. + +### Secure propagation + +Since the preconfirmation data originates directly from the Sequencer's Rollup Boost instance, exposing this WebSocket endpoint directly to external parties presents security and scalability concerns. Instead, a reverse proxy should be implemented between Rollup Boost and external RPC providers to relay this information securely. + +This mirror simply relays WebSocket data without requiring any Flashblocks-specific knowledge, acting purely as a transport layer that forwards WebSocket messages from Rollup Boost to subscribed RPC providers. You can find an example implementation [here](https://github.com/base/flashblocks-websocket-proxy). + +```mermaid +flowchart TD + subgraph Sequencer + BB[Block Builder] + RB[Rollup Boost] + Mirror[Flashblocks Mirror] + end + + subgraph RPC Providers + RPC1[RPC Provider 1] + RPC2[RPC Provider 2] + RPC3[RPC Provider 3] + RPCN[RPC Provider N] + end + + BB -->|Flashblocks| RB + RB -->|Flashblocks| Mirror + Mirror -->|Flashblocks| RPC1 + Mirror -->|Flashblocks| RPC2 + Mirror -->|Flashblocks| RPC3 + Mirror -->|Flashblocks| RPCN +``` + +## Flashblock JSON-RPC APIs + +### Ethereum JSON RPC Modifications + +All modifications done to the existing Ethereum JSON RPC methods are confined to overloading the existing `pending` tag. Originally, this tag was designed to return block data being processed by the node's internal miner. It's fitting that we now use it for a similar purpose: exposing blocks in their preconfirmation stage. When queried with the `pending` tag, the endpoint uses the preconfirmation cache state to construct the response. The response might include not only transactions but also block metadata like state root and receipt root. + +The tag is currently in a soft-deprecated state due to inconsistent implementations across clients, particularly after The Merge. However, it's worth noting that it's still actively used for certain endpoints, particularly `eth_getTransactionCount` where it serves the important function of returning the next available nonce for an account (including transactions in the mempool). This presents an opportunity: the tag is well-defined enough to be supported by client libraries, yet loosely defined enough to allow for our preconfirmation use case. While there's a possibility of the tag being removed in the future (see [EIP discussions](https://github.com/ethereum/execution-apis/issues/495)), the design could adapt by introducing a flashblocks-specific tag if needed. + +We repurpose the `pending` tag in the following RPC calls to enable consuming preconfirmed state: + +- eth_getTransactionReceipt +- eth_getBlockByHash +- eth_getBalance +- eth_call +- eth_getCode +- eth_getTransactionCount +- eth_getStorageAt + +### op_supportedCapabilities + +This endpoint allows clients to discover whether the RPC provider supports certain features, including Flashblocks. + +**Request** + +```json +{ + "method": "op_supportedCapabilities", + "params": [], + "id": 1, + "jsonrpc": "2.0" +} +``` + +**Response** + +```json +{ + "id": 1, + "jsonrpc": "2.0", + "result": ["flashblocksv1"] +} +``` + +When this method is called on a Flashblocks-compatible RPC provider, the response includes "flashblocksv1" in the returned array of supported capabilities. This allows clients to programmatically determine whether they can utilize Flashblocks functionality before making related requests. + +This endpoint follows a similar pattern to the Engine API's `engine_exchangeCapabilities` method, which allows consensus and execution clients to exchange information about supported features. + +This is the only new RPC endpoint introduced by the Flashblocks specification. We consider this addition acceptable because it provides necessary feature discovery while keeping the name abstract enough to accommodate future extensions to the protocol or for other protocols. + +**`eth_getTransactionReceipt`** + +**Request** + +```json +{ + "method": "eth_getTransactionReceipt", + "params": ["0x..."],// Transaction hash + "id": 1, + "jsonrpc": "2.0" +} +``` + +**Response** + +```json +{ + "transactionHash": "0x...", + "blockHash": "0x0", // Empty hash as placeholder + "blockNumber": "0x...", // Current pending block number + "transactionIndex": "0x0", + "from": "0x...", + "to": "0x...", + "gasUsed": "0x...", + "status": "0x1", + "cumulativeGasUsed": "0x...", + "effectiveGasPrice": "0x...", + "contractAddress": "0x...", // For contract creations + "logs": [], + "logsBloom": "0x..." +} +``` + +When queried, this endpoint first checks the preconfirmation cache for the requested transaction hash before falling back to the standard chain state lookup. + +Some fields in the response cannot be final at the preconfirmation stage and require placeholder values: + +- `blockHash`: Uses empty hash as placeholder +- `blockNumber`: Can be set to the current block number being processed + +**`eth_getBlockByHash`** + +**Request** + +```json +{ + "method": "eth_getBlockByHash", + "params": ["pending", false], // Second parameter indicates full transaction objects (true) or only hashes (false) + "id": 1, + "jsonrpc": "2.0" +} +``` + +**Response** + +```json +{ + "hash": "0x0", // Empty hash as placeholder + "parentHash": "0x...", + "stateRoot": "0x...", + "transactionsRoot": "0x...", + "receiptsRoot": "0x...", + "number": "0x...", // Current pending block number + "gasUsed": "0x...", + "gasLimit": "0x...", + "timestamp": "0x...", + "extraData": "0x...", + "mixHash": "0x...", + "nonce": "0x...", // // Used to signal flashblock index + "transactions": [] // Array of transaction hashes or full transaction objects +} +``` + +The endpoint implements an append-only pattern - multiple queries during the same block's preconfirmation phase will show an expanding list of transactions as new flashblocks are processed. Each query reflects the current state of all preconfirmed transactions at that moment. + +```mermaid +sequenceDiagram + participant U as User + participant RPC + participant R as Rollup-boost + + Note over R: Block building starts + R->>RPC: Batch 1 (txs: A, B) + + U->>RPC: Query 1 + RPC-->>U: Block with txs: A, B + + R->>RPC: Batch 2 (txs: C, D) + U->>RPC: Query 2 + RPC-->>U: Block with txs: A, B, C, D + + R->>RPC: Batch 3 (txs: E) + U->>RPC: Query 3 + RPC-->>U: Block with txs: A, B, C, D, E + + R->>RPC: Batch 4 (txs: F, G) + U->>RPC: Query 4 + RPC-->>U: Block with txs: A, B, C, D, E, F, G + + Note over R: Block sealed +``` + +**`eth_getBalance`** + +**Request** + +```json +{ + "method": "eth_getBalance", + "params": ["0x...", "pending"], // Account address and block parameter + "id": 1, + "jsonrpc": "2.0" +} +``` + +**Response** + +```json +"0x..." // Balance in wei +``` + +When queried with the "pending" tag, the endpoint uses the preconfirmation cache state to return the account balance. If the requested account appears in the `AccountMetadata` of a received Flashblock with a non-null `balance` field, the RPC provider can directly return this value without needing to access the full state. The response reflects all changes from preconfirmed transactions that affect the requested account's balance. + +**`eth_call`** + +**Request** + +```json +{ + "method": "eth_call", + "params": [{"to": "0x...", "data": "0x..."}, "pending"], // Transaction call object and block parameter + "id": 1, + "jsonrpc": "2.0" +} +``` + +**Response** + +```json +"0x..." // Return data from the call +``` + +When queried with the "pending" tag, the endpoint uses the preconfirmation cache state to return the call result. For this endpoint to work, the preconfirmation stream needs to include state differences for both accounts and storage after each flashblock. + +Similar to the current override functionality in `eth_call` where EVM transitions are executed on top of modified state, this implementation executes the call on top of the preconfirmation state changes. + +**`eth_getCode`** + +**Request** + +```json +{ + "method": "eth_getCode", + "params": ["0x...", "pending"],// Contract address and block parameter + "id": 1, + "jsonrpc": "2.0" +} +``` + +**Response** + +```json +"0x..."// Contract bytecode +``` + +When queried with the "pending" tag, the endpoint returns the contract bytecode from the preconfirmation cache state. If the requested account appears in the `AccountMetadata` of a received Flashblock with a non-null `code` field, the RPC provider can directly return this value without accessing the full state. + +**`eth_getTransactionCount`** + +**Request** + +```json +{ + "method": "eth_getTransactionCount", + "params": ["0x...", "pending"],// Account address and block parameter + "id": 1, + "jsonrpc": "2.0" +} +``` + +**Response** + +```json +"0x..."// Nonce value as a hex string +``` + +When queried with the "pending" tag, the endpoint returns the transaction count (nonce) of the account from the preconfirmation cache. If the requested account appears in the `AccountMetadata` of a received Flashblock, the RPC provider can directly use the `nonce` field without additional state access. + +**`eth_getStorageAt`** + +**Request** + +```json +{ + "method": "eth_getStorageAt", + "params": ["0x...", "0x...", "pending"],// Contract address, storage position, and block parameter + "id": 1, + "jsonrpc": "2.0" +} +``` + +**Response** + +```json +"0x..." // Storage value as a hex string +``` + +When queried with the "pending" tag, the endpoint returns the value from the specified storage slot using the preconfirmation cache state. If the requested account appears in the `AccountMetadata` of a received Flashblock, the RPC provider scans the `storage_slots` list for the requested key and returns the corresponding value directly. + +# Reliability and Operational Considerations + +## Transaction Propagation + +Similar to the design laid out in the [External Block Production](https://github.com/ethereum-optimism/design-docs/blob/main/protocol/external-block-production.md) design document, Flashblocks makes no assumptions about how transactions are delivered to the block builder. A non-exhaustive list of valid approaches: + +- transaction forwarding via mutliplex’ing software at the Rollup Operator’s RPC +- Private p2p connections between Sequencer transaction ingress nodes and block building nodes + +## Failover scenarios + +### Block Builder + +As per the normal Rollup-boost behavior, if the builder is down, the Rollup-boost picks up the block from the fallback builder. However, since we are dealing with preconfirmations, we must consider the relative value of preserving preconfirmations versus building a potentially more valuable block. + +In this design document, we follow the invariant that preserving preconfirmations takes precedence. If the block builder goes down after the first flashblocks have been delivered, we still return those flashblocks to maintain the integrity of any preconfirmations already issued to users. The next block would work as expected through the normal fallback mechanism, as the builder is down and the fallback builder would be used. + +We could technically discard the partial flashblocks and use the fallback block entirely, but this would violate the preconfirmations commitment. Our design assumes normal execution conditions. If losing the builder mid-flashblock becomes a common occurrence, this would indicate fundamental architectural issues that require separate improvements beyond the scope of this failover mechanism. + +### The Sequencer or Rollup-boost + +These failure scenarios are addressed as part of the High Availability (HA) sequencer setups. The HA architecture ensures continuity of operations by automatically failing over to standby instances when failures occur. + +## Integration with High Availability Sequencer Setups + +The integration of Flashblocks with High Availability (HA) Sequencer setups is outside the scope of this initial specification document. For details on managing Flashblock state across multiple sequencer instances and maintaining preconfirmation integrity during failovers, please refer to the resources linked below. + +- what do with a rotating set of sequencers like with OP conductor https://github.com/ethereum-optimism/optimism/tree/develop/op-conductor +- World HA design discussion https://github.com/flashbots/rollup-boost/issues/181 +- Base Technical Design Document [TDD: Rollup Boost Integration with HA Sequencer](https://www.notion.so/TDD-Rollup-Boost-Integration-with-HA-Sequencer-1d0c9d820ca380348f21e44a5442feaf?pvs=21) + +## Faults + +### Safety Faults + +In the rollup security vocabulary *safety* implies that “**no one can create or withdraw assets they are not entitled to.**” A **safety fault** therefore occurs the moment an **invalid L2 state root** is accepted on Ethereum **and** at least one L2→L1 action (withdrawal, message relay, etc.) that depends on that root is executed **and** the dispute game period has ended. After that point the canonical record on Ethereum says the invalid state is *final* and the rollup’s honesty assumption is broken. + +The safety of a flashblock is directly equivalent to the safety of an L2 block. Additionally, on each submission of a flashblock to Rollup Boost, it is simulated against the Sequencer’s local execution engine, ensuring the Block Builder’s view is equivalent to the Sequencer’s. + +The real thing we are interested in regards to safety faults for the Flashblock stream is whether they can be reorged. The answer to this question is that the preconfirmed state can be reorged out if the Sequencer reorgs. Given that the sequencer is the one validating the block builder blocks, then there is no additional risk of reorg from the introduction of the External Block Builder and Flashblocks stream, as in both cases, the reorg is due to Sequencer Operator error. + +### Liveness Faults + +In the rollup vocabulary *Liveness implies that “*every honest user can (a) get a transaction included within a bounded time and (b) complete a withdrawal within the 7‑day challenge window.” A **liveness fault** is any condition that makes either promise untrue *without violating safety* (no invalid state is accepted). + +The liveness of a flashblock is therefore directly equivalent to the liveness of L2 blocks as user’s are able to force include via the L1 as normal. + +# Rationale + +### Why out-of-protocol + +The design is implemented as an out-of-protocol solution rather than a core protocol modification to allow for faster iteration and development. This approach respects the stability guarantees of the OP Stack while allowing participants to adopt the features at their own pace. + +We do not, however, discard the possibility of enshrining these features inside the OP Stack protocol as both teams become more comfortable working together and more familiar with the specification. This out-of-protocol approach serves as a proving ground that can inform a potential future core integration. + +### Why not shorter block times + +While reducing block times is a potential solution, it would require non-trivial changes to the OP Stack codebase, where the current minimum timestamp used is 1 second. Additionally, extremely short block times (sub-200ms) might introduce significant performance issues in other blockchain infrastructure like block explorers and indexers. + +Flashblocks provide a more balanced approach: they maintain reasonable block times for network decentralization and stability, while offering a fast-lane feedback mechanism for users who need immediate transaction status. + +This approach also opens the door to an interesting possibility: chains could potentially implement longer block times (tens of seconds) while still maintaining quick preconfirmations via Flashblocks. This combination might enable new and interesting use cases that benefit from both paradigms. + +# Backwards Compatibility + +## End Users + +At present, consuming Flashblocks data is completely opt-in through the use of the `pending` tag, therefore once turned on, no applications will require changes to how they consume data from their RPC. Instead an additional opt-in flow is enabled. + +## Infrastructure Operators + +For Sequencer Operators, Flashblocks and Rollup Boost can be enabled and disabled with no additional harm to the system. + +For RPC Operators, Flashblocks will require a modified RPC node that subscribes to the Flashblock stream in addition to maintaining a Preconfirmation cache and responding with the relevant data on request with the `pending` tag. + +# Implementation + +A feature complete implementation of all components described in this document can be found in the [rollup-boost](https://github.com/flashbots/rollup-boost), [op-rbuilder](https://github.com/flashbots/rbuilder/tree/develop/crates/op-rbuilder), [flashblocks-websocket-proxy](https://github.com/base/flashblocks-websocket-proxy), and [reth-flashblocks](https://github.com/danyalprout/reth-flashblocks). \ No newline at end of file diff --git a/rust/rollup-boost/specs/flashblocks_p2p.md b/rust/rollup-boost/specs/flashblocks_p2p.md new file mode 100644 index 0000000000000..83ea64fe05c50 --- /dev/null +++ b/rust/rollup-boost/specs/flashblocks_p2p.md @@ -0,0 +1,193 @@ +# Flashblocks P2P Extension + +*This document is an extension to the original Flashblocks specification, modifying the flashblock propagation mechanism to use a peer-to-peer (P2P) network instead of WebSockets. It highlights the new P2P protocol and the changes in Rollup-Boost and builder interactions, aimed at simplifying distribution and improving fault tolerance in High Availability (HA) sequencer setups.* + +**Table of Contents** + +* [Abstract](#abstract) +* [Motivation](#motivation) +* [Specification](#specification) + + * [Terminology](#terminology) + * [Data Structures](#data-structures) + + * [**`Authorization`**](#authorization) + * [**`Authorized Message`**](#authorized-message) + * [**`StartPublish`**](#startpublish) + * [**`StopPublish`**](#stoppublish) + * [Flashblocks P2P Protocol](#flashblocks-p2p-protocol) + + * [Protocol Overview](#protocol-overview) + * [Message Types](#message-types) + * [Authorization and Security](#authorization-and-security) + * [Multi-Builder Coordination](#multi-builder-coordination) + * [Rollup-Boost and Builder Communication](#rollup-boost-and-builder-communication) + +# Abstract + +This document introduces an enhancement to Flashblocks where the propagation of partial blocks (“flashblocks”) is done over an Ethereum P2P subprotocol instead of a WebSocket broadcast. By integrating flashblock distribution into the peer-to-peer network, we eliminate the need for a dedicated WebSocket proxy and enable more robust, decentralized propagation of flashblock data. Crucially, this P2P approach uses cryptographic authorization to ensure that only an **authorized** block builder (and its designated successors in an HA setup) can publish flashblocks, improving fault tolerance during sequencer failovers. The end result is a simpler and more resilient system for delivering rapid preconfirmation data to users, without altering the core OP Stack protocol. + +# Motivation + +The original Flashblocks design relied on a centralized broadcast (via Rollup-Boost and a WebSocket proxy) to propagate flashblocks to RPC providers. While effective, that design introduced operational complexity and potential single points of failure: + +* **Operational Complexity:** Sequencer operators had to manage a WebSocket broadcasting service (e.g. Rollup-Boost’s WebSocket proxy) to fan-out flashblocks to providers. In multi-sequencer (HA) configurations, handing off this connection or migrating subscribers was cumbersome. +* **Failover Challenges:** In a High Availability sequencer setup, if the active sequencer failed the act of switching to a new sequencer/rollup-boost/builder combo would mean that already published flashblocks would not make it in the new block produced by the new builder. This breaks the promise that flashblocks makes to its consumers. +* **Scalability and Decentralization:** Relying on a single hub (the sequencer’s Rollup-Boost) to redistribute flashblocks could become a bottleneck. A P2P approach can naturally scale out to many peers and align with Ethereum’s existing propagation model for blocks and transactions. + +**P2P Propagation** addresses these issues by leveraging a gossip network for flashblocks. In this model, any number of RPC provider nodes (or other interested parties) can connect to the flashblock P2P network to receive preconfirmation updates. Failover is handled gracefully through the RLPx protocol: if a new sequencer takes over, its builder is already aware of previously published flashblocks, and so it can build on top of what has already been promised to the network. + +# Specification + +## Terminology + +We inherit all terminology from the original Flashblocks spec (Sequencer, Block Builder, Rollup-Boost, etc.), with a few new terms introduced: + +* **Authorizer** – The entity that vouches for a block builder’s legitimacy to produce flashblocks. In practice, this is rollup-boost who signs an authorization for a given builder each block cycle. +* **Builder Public Key** – A cryptographic public key identifying a builder on the flashblocks P2P network. This is distinct from an Ethereum address; it’s used for signing/validating flashblock messages. +* **Flashblocks P2P Network** – The peer-to-peer overlay network (using Ethereum’s devp2p protocols) through which flashblock messages are gossiped. Participants include all builders and one or more subscribing nodes (e.g. RPC providers, possibly other sequencer nodes in standby). +* **Publisher** – The current active builder that is publishing flashblocks for the ongoing L2 block. In an HA setup, the role of publisher can transfer to a new builder if the sequencer fails over. + +## Data Structures + +The fundamental flashblock data structures (`FlashblocksPayloadV1`, `ExecutionPayloadFlashblockResultV1`, `ExecutionPayloadStaticV1`, and the various Metadata containers) remain unchanged. Flashblocks are still represented as a sequence of incremental payloads culminating in a full block. + +To support P2P propagation and authorization, we introduce several new structures: + +### **`Authorization`** + +Represents a sequencer’s cryptographic authorization for a specific builder to produce a block with a given payload context. This is essentially a signed token from the sequencer (authorizer) that the builder includes with its flashblocks. + +```rust +pub struct Authorization { + pub payload_id: PayloadId, + pub timestamp: u64, + pub builder_vk: VerifyingKey, + pub authorizer_sig: Signature, +} +``` + +* `payload_id`: The unique ID for this block’s payload (as provided by `engine_forkchoiceUpdated` in the OP Stack Engine API). All flashblocks for the block share this ID. +* `timestamp`: The timestamp associated with this payload +* `builder_vk`: The verifying key identifying the builder authorized to publish this block’s flashblocks. Peers will use this to verify the builder’s signatures on messages. +* `authorizer_sig`: A signature produced by the sequencer (authorizer) over the concatenation of `payload_id`, `timestamp`, and `builder_vk`. This proves that the sequencer has approved the given builder (and key) to act for this block. Only one authorizer key (controlled by the rollup-boost operator) is recognized by the network, and all peers are configured with its public key for verification. + +### **`Authorized Message`** + +Container for any flashblocks P2P message that requires authorization. It bundles a payload (one of the message types defined below) with the authorization and a builder’s signature. + +```rust +pub struct Authorized { + pub msg: AuthorizedMsg, + pub authorization: Authorization, + pub actor_sig: Signature, +} +``` + +```rust +pub enum AuthorizedMsg { + FlashblocksPayloadV1(FlashblocksPayloadV1) = 0x00, + StartPublish(StartPublish) = 0x01, + StopPublish(StopPublish) = 0x02, +} +``` + +* `authorization`: The Authorization object, as described above. +* `msg`: The message content. This is a tagged union that can be one of: + + * A **Flashblock Payload** – Contains a `FlashblocksPayloadV1` (partial block delta), see below. + * A **StartPublish** signal – Indicates the builder is starting to publish a new block (detailed in [StartPublish](#startpublish)). + * A **StopPublish** signal – Indicates the builder is stopping publication (detailed in [StopPublish](#stoppublish)). + +* `actor_sig`: The builder’s signature over the combination of the `msg` and the `authorization`. This attests that the message indeed comes from the holder of the `builder_sk` in the Authorization, and that it hasn’t been tampered with in transit. + +Every P2P message in the Flashblocks protocol is sent as an `AuthorizedMessage`. This double-signature scheme (authorizer + builder) provides two layers of security: + +1. Only a builder with a valid Authorization (signed by the sequencer) can get its messages accepted by peers. +2. Only the genuine builder (holding the private key corresponding to `builder_sk`) can produce a valid `builder_signature` on the message content. + +### **`StartPublish`** + +A small message indicating the intention to begin publishing flashblocks for a new L2 block. + +```rust +pub struct StartPublish; +``` + +The `StartPublish` message is always sent wrapped in an `AuthorizedMessage` (with the appropriate authorization and signatures). It serves as an announcement to the network that *“Builder X is about to start publishing”* + +### **`StopPublish`** + +An authorized message indicating that the builder will no longer publish any flashblocks + +```rust +pub struct StopPublish; +``` + +**Note:** A builder will typically send a `StopPublish` when it receives a `ForkChoiceUpdated` without an accompanying `Authorization` from rollup-boost or upon handing off flashblock production to a new builder. + +## Flashblocks P2P Protocol + +### Protocol Overview + +Flashblocks P2P communication is implemented as a custom Ethereum subprotocol. Specifically, it defines a new devp2p capability: + +* **Protocol Name:** `flblk` (flashblocks) +* **Version:** `1` + +Nodes that support flashblocks will advertise this capability when establishing devp2p connections. Once connected, they can exchange flashblock messages as defined in this spec. + +All flashblock messages are encoded in a compact binary format (analogous to Ethereum block gossip). Each message begins with a one-byte type discriminator, followed by the serialized content. The primary message type is an `AuthorizedMessage` (discriminator `0x00`), which, as described, contains a nested payload type. + +**Key design features of the P2P protocol:** + +* **Multipeer Gossip:** A builder’s flashblock is forwarded to all connected peers, who in turn may forward it to their peers, etc., ensuring the payload reaches all participants without needing a single central broadcaster. The protocol includes basic duplicate suppression so that flashblocks aren’t endlessly propagated in loops. +* **Real-time Coordination:** Using `StartPublish` and `StopPublish` signals, multiple potential publishers (builders) can coordinate access to the network. This prevents conflicts where two builders might try to publish simultaneously, and allows a smooth handoff in failover scenarios (detailed below). + +### Message Types + +Within the `AuthorizedMsg` union, we define the following variants and their semantics: + +* **Flashblock Payload Message:** Carries a `FlashblocksPayloadV1` (as defined in the original spec) for a specific partial block. This includes the incremental transactions, updated state root, receipts root, logs bloom, etc., up through that flashblock. Peers receiving this message will apply the included state updates to their preconfirmation cache. Each Flashblock message has an `index` (the flashblock sequence number) and may include the `base` section if it’s the first flashblock (index 0) for that block. +* **StartPublish Message:** Announces the start of a new publishers flashblock sequence. Peers use this to note which builder is now active for a given L2 block number, possibly resetting any previous state or halting their own publishing. +* **StopPublish Message:** Indicates the end of the flashblock sequence for the current publisher. After this message, no further flashblocks from that publisher should arrive. Inactive or waiting publishers use this as a cue that they may now take over for subsequent flashblocks. + +All these are encapsulated in `AuthorizedMsg` with the requisite signatures. + +### Authorization and Security + +The P2P protocol introduces a trust model wherein peers accept flashblocks only from an **authorized builder**. The security measures include: + +* **Authorizer Signature Verification:** Upon receiving any `AuthorizedMessage`, a peer will first verify the `authorizer_sig` in the `Authorization` against the known authorizer public key. This confirms that rollup-boost has indeed permitted the stated builder to produce the block with the given `payload_id` and timestamp. If this signature is missing or invalid, the message is discarded as untrusted. +* **Builder Signature Verification:** Next, the peer verifies the `builder_signature` on the message content using the `builder_vk` provided in the Authorization. This ensures the message was genuinely produced by the authorized builder and not altered. If this check fails, the message is rejected. +* **Payload Consistency Checks:** Peers also check that the fields in the message are self-consistent and match expectations: + + * The `payload_id` in the Authorization must match the `FlashblocksPayloadV1.payload_id` (for flashblock messages). Each builder’s flashblock messages carry the same payload\_id that was authorized, ensuring they all belong to the same block-building session. + * **Freshness:** The `timestamp` in Authorization helps guard against replay of old messages. If a flashblock or StartPublish arrives with a significantly older timestamp (or for an already completed block), peers will ignore it and decrement the sender's reputation. + +These measures ensure that **only** the rollup-boost sanctioned builder’s data is propagated and that it’s cryptographically sound. Unauthorized parties cannot inject false flashblocks or tamper with content without detection. This design also allows dynamic builder changes: as long as the sequencer signs a new Authorization, the peers will accept the new builder’s messages even if they have never seen that builder before, because trust is transitive from the authorizers’s key. + +### Multi-Builder Coordination + +A major benefit of the P2P approach is the ability to coordinate multiple builders in an HA (High Availability) setting. The `StartPublish` and `StopPublish` messages, in conjunction with a small amount of logic in Rollup-Boost and the network, handle the arbitration: + +* **Single Publisher Rule:** The network expects at most one builder to be actively publishing flashblocks for a given L2 block number at any time. If two different builders both attempt to publish for the same block, the conflict must be resolved to maintain a consistent preconfirmation state. +* **Announcing Intent – `StartPublish`:** When Rollup-Boost (sequencer) initiates a new block with an external builder, it immediately broadcasts a `StartPublish` message (as an AuthorizedMessage) from that builder. This tells all peers: “Builder X is about to start publishing” If any other builder was thinking of building block N (perhaps there was a recent failover), it will see this and **stand down**. +* **Graceful Yield – reacting to `StartPublish`:** If a builder is currently publishing and receives a `StartPublish` from a *different* builder for the same or next block, it means a failover or override is happening. The expected behavior is that the current publisher will cease publishing (and issue a `StopPublish`). The protocol is designed such that the honest builder who is not supposed to publish will yield to the authorized one. The reference implementation will automatically send a `StopPublish` if it is publishing and learns that another builder has taken over authority for the block. The new builder will wait until it receives the `StopPublish` before continuing. +* **Completion – `StopPublish`:** When a builder receives the next FCU _without_ an accompanying `Authorization`, it will send out a `StopPublish`. This removes the builder from the “active publisher” role in the eyes of the network. If there was another builder in waiting (perhaps one that had attempted to start earlier but was told to wait), that waiting builder will now see that the coast is clear. +* **Timeouts and Fallback:** There is an implicit timeout in the coordination. If a builder is in a waiting state after announcing `StartPublish` but for some reason the previous publisher fails to produce a `StopPublish` (for example, if it crashed mid-block), other participants will not wait indefinitely. In our design, if a new block number is reached and the previous publisher hasn’t stopped we assume the previous builder is incapacitated and proceed with the new publisher. + +This coordination ensures that in an HA setup with multiple sequencer instances and multiple builders, **preconfirmation data remains consistent**: only one set of flashblocks is ever in flight for a given block. If a sequencer failover occurs, the worst-case scenario (which occurs only during a very rare race condition) is a single block publication gap or discontinuity at a block boundary. In the far more likely case, there will be exactly no flashblock disruption. The next publisher will simply start where the last publisher left off, even if that is mid block. + +## Rollup-Boost and Builder Communication + +In the P2P-enhanced design, Rollup-Boost’s interaction with the external block builder is slightly adjusted: + +* **Authorization Delivery:** When the sequencer (op-node) triggers a new block proposal via `engine_forkchoiceUpdated` (with payload attributes), Rollup-Boost creates an `Authorization` for the chosen builder. This requires that Rollup-Boost knows the builder’s public key in advance. In practice, the builder can be configured or registered with Rollup-Boost, providing its long-term public key. Rollup-Boost uses its **authorizer private key** (associated with the L2 chain or sequencer) to sign the authorization (covering payload\_id, timestamp, builder’s key). +* **Forkchoice Updated Forwarding:** Rollup-Boost forwards the fork choice update to the builder as usual (so the builder can start building the block). In this modified protocol, the fork choice update (or a parallel communication) includes the newly created `Authorization`. For example, a custom field or side-channel could convey the authorizer’s signature to the builder. **(Implementation-wise, this might be an extension of the Engine API or an internal call – the key point is the builder receives the Authorization token before it begins sending flashblocks.)** +* **StartPublish Broadcast:** If the builder was not previously publishing, then immediately after receiving the authorization it will emit a `StartPublish` message over the P2P network. This tells all listening nodes that the authorized builder will begin flashblock publication. +* **Streaming Flashblocks:** The builder executes transactions and produces flashblocks incrementally just as described in the original spec’s Flashblock Construction Process. However, instead of returning these payloads to Rollup-Boost, the builder now signs each flashblock with its key and directly broadcasts an Authorized Flashblock message to the P2P network. +* **No Inline Validation by Sequencer:** In the original design, Rollup-Boost would validate each flashblock against the local execution engine before propagating it. In the P2P model, this is not done synchronously for each flashblock (it would negate some latency benefits). Instead, trust is managed via the Authorization. The sequencer trusts its chosen builder to only send valid blocks (and will ultimately verify the final block when `engine_getPayload` is called). Peers trust the flashblocks because they trust the Rollup-Boost’s signature. + +In summary, Rollup-Boost’s role shifts from being a **middleman for data** to being a **controller and coordinator**. It authorizes the builder and informs the network about which builder is active, but it doesn’t need to ferry every flashblock through itself. This streamlines the path from builder to RPC providers. + diff --git a/rust/rollup-boost/specs/flashtestations.md b/rust/rollup-boost/specs/flashtestations.md new file mode 100644 index 0000000000000..85d91da17e189 --- /dev/null +++ b/rust/rollup-boost/specs/flashtestations.md @@ -0,0 +1,839 @@ + Flashtestations: Transparent Onchain TEE Verification and DCAP Attestation Registry Protocol + +*Authors: [fnerdman](https://github.com/fnerdman), [Melville](https://github.com/Melvillian), [dmarz](https://github.com/dmarzzz), [Ruteri](https://github.com/Ruteri)* + +**Table of Contents** +- [Abstract](#abstract) +- [Prerequisites](#prerequisites) +- [Motivation](#motivation) +- [Specification](#specification) + - [Terminology](#terminology) + - [Intel TDX Primitives](#intel-tdx-primitives) + - [Flashtestations Protocol Components](#flashtestations-protocol-components) + - [Operational Terms](#operational-terms) + - [Data Structures](#data-structures) + - [**`TDReport`**](#tdreport) + - [**`DCAPEndorsements`**](#dcapendorsements) + - [**`RegisteredTEE`**](#registeredtee) + - [**`Extended Registration Data`**](#extended-registration-data) + - [System Architecture](#system-architecture) + - [TEE Attestation Mechanism](#tee-attestation-mechanism) + - [Intel TDX DCAP Attestation](#intel-tdx-dcap-attestation) + - [Onchain DCAP Attestation](#onchain-dcap-attestation) + - [Flashtestation Registry](#flashtestation-registry) + - [Core Concepts](#core-concepts) + - [Key Relationship Model](#key-relationship-model) + - [Fundamental Operations](#fundamental-operations) + - [Key Requirements](#key-requirements) + - [Attestation Verification Endpoint](#attestation-verification-endpoint) + - [Policy Layer: Flexible Authorization](#policy-layer-flexible-authorization) + - [Policy Abstraction](#policy-abstraction) + - [Policy Operations](#policy-operations) + - [End-to-End Flow](#end-to-end-flow) + - [Attestation and Registration](#attestation-and-registration) + - [Runtime Authorization](#runtime-authorization) + - [Maintenance: Handling Changing Endorsements](#maintenance-handling-changing-endorsements) + - [Gas Cost Considerations and Future Optimizations](#gas-cost-considerations-and-future-optimizations) + - [Offchain TEE Verification](#offchain-tee-verification) + - [Example Verification Flow](#example-verification-flow) + - [Transparency Log](#transparency-log) + - [Purpose and Benefits](#purpose-and-benefits) + - [Logged Information](#logged-information) + - [Implementation Approach](#implementation-approach) + - [Relationship with Registry](#relationship-with-registry) +- [Security Assumptions](#security-assumptions) +- [Rationale](#rationale) + - [Replacement Model](#replacement-model) + - [Gas Optimization](#gas-optimization) + - [Separation of Concerns](#separation-of-concerns) + - [Block Builder TEE Proofs](#block-builder-tee-proofs) + - [Core Mechanism](#core-mechanism) + - [Block Building Process](#block-building-process) + - [Verification Contract](#verification-contract) + - [Security Properties](#security-properties) + + +# Abstract + +Integrating TEEs with dapps presents significant challenges. Flashtestations frames the onchain representation of a TEE through its **`TEE-controlled address`**—an onchain identifier for an entity that can operate as a verifiable offchain smart contract or [**TEE Coprocessor**](https://writings.flashbots.net/suave-tee-coprocessor). For such entities to be trusted and usable by onchain smart contracts, key questions must be addressed: How can their authenticity and the code they run be cryptographically verified? How can this verification scale efficiently onchain and be maintained as hardware security requirements evolve? + +Flashtestations addresses these challenges by providing a comprehensive onchain protocol for TEE verification, **`TEE-controlled address`** registration, and transparent record-keeping. The protocol enables: + +1. Onchain verification of Intel TDX attestations against current Intel endorsements. +2. Maintenance of a curated Registry of **`TEE-controlled addresses`** associated with their respective DCAP Attestations. +3. Policy-based authorization for TEE services (including TEE Coprocessors identified by their **`TEE-controlled address`**) to securely interact with smart contracts. +4. Transparent logging of all attestation events and endorsement changes. + +# Prerequisites + +This document assumes familiarity with the following background material, specifications, and tooling. Items are arranged in the rough order they become relevant while reading this spec: + +1. **Intel TDX Architecture & Security Model** — core concepts, measurement registers, Trust Domain isolation, and attestation flows. + — [Intel TDX Specifications and Developer Guides](https://www.intel.com/content/www/us/en/developer/tools/trust-domain-extensions/documentation.html) +2. **Intel DCAP Attestation Stack** — Quote generation, signature scheme and collateral (QE Identity & TCB Info) retrieval. + — [Intel TDX DCAP Quoting Library API](https://download.01.org/intel-sgx/latest/dcap-latest/linux/docs/Intel_TDX_DCAP_Quoting_Library_API.pdf) +3. **On‑Chain DCAP Quote Verification** — Solidity contracts that decode DCAP quotes and perform cryptographic validation using PCCS‑sourced endorsements. + — [Automata DCAP Attestation Contract](https://github.com/automata-network/automata-dcap-attestation) +4. **On‑Chain Endorsement Storage (PCCS)** — Automata’s Solidity implementation that mirrors Intel collateral on Ethereum, enabling fully reproducible verification. + — [Automata On‑chain PCCS](https://github.com/automata-network/automata-on-chain-pccs) + +# Motivation + +The integration of Trusted Execution Environments (TEEs) with onchain applications offers transformative potential, particularly for enabling sophisticated **TEE Coprocessors**. These entities, which can function as verifiable offchain smart contracts and are identified onchain by their **`TEE-controlled address`**, can execute complex or confidential tasks. Realizing this potential, however, requires addressing fundamental questions regarding their verification, scalability, and maintainability within the blockchain environment: + +* How can onchain smart contracts cryptographically verify they are interacting with an authentic TEE (represented by its **`TEE-controlled address`**) running the intended software? +* How can this verification scale efficiently onchain for regular use? +* How can an up-to-date registry of validated TEEs be maintained as their underlying hardware security requirements and endorsements (like Intel DCAP endorsements) evolve? +* How can these processes be transparent to foster trust within the ecosystem? + +Flashtestations is motivated by the need to solve these fundamental problems of TEE and onchain interaction and provides an onchain protocol that serves as a crucial building block for this interaction. This protocol primarily enables secure **TEE outbound calls** - allowing TEEs to make calls to onchain smart contracts and be verifiably authorized via their **`TEE-controlled address`**. Furthermore, by facilitating the onchain availability of verified TEE public keys (derived from `ReportData` in the attestation quote), it lays a crucial foundation for secure **TEE inbound calls**. This equips users and smart contracts with the trusted information needed to establish secure communication channels with attested TEEs. By providing these foundational elements, Flashtestations empowers the development of a robust ecosystem. + +To achieve this, Flashtestations aims for the following objectives: + +1. **Security (Verifiable TEE Interaction)**: To provide irrefutable cryptographic proof that a **`TEE-controlled address`**, is backed by a genuine TEE executing specific, measured code (its **`workloadId`**). This ensures all interactions involving TEEs are resistant to spoofing and unauthorized code execution. + +2. **Efficiency (Performant Onchain Integration)**: To ensure that critical onchain operations, particularly the lookup and validation of the TEE's status via its **`TEE-controlled address`**, are highly gas-efficient (aiming for O(1) complexity). This is vital for enabling frequent and cost-effective TEE outbound and TEE inbound calls. + +3. **Maintainability (Dynamic Trust Anchoring)**: To support the efficient updating of the TEE registry as underlying TEE security endorsements evolve. This ensures the trustworthiness of registered TEEs remains aligned with current security standards over time. + +4. **Transparency (Auditable Ecosystem)**: To maintain a publicly accessible and auditable record of TEE attestations (as described in the Transparency Log section), including their **`workloadId`**s, and any changes to their validation status. This fosters accountability and allows for independent security analysis, building broader trust for all TEE-onchain interactions. + +# Specification + +## System Architecture + +Within the Flashtestations specification, the protocol architecture consists of four key components that work together to provide secure onchain TEE verification: + +``` +┌─────────────────────────┐ ┌─────────────────────┐ +│ TDX VM │ │ Onchain Verifier │ +│ │ Attestation │ │ +│ ┌─────────────────┐ │ Quote │ ┌─────────────────┐ │ +│ │ TEE Workload │ │ ───────────────► │ │ DCAP Attestation│ │ +│ │ │ │ │ │ Verifier │ │ +│ │ (measurements) │ │ │ │ │ │ +│ └─────────────────┘ │ │ └────────┬────────┘ │ +│ │ │ │ │ +└─────────────────────────┘ │ ▼ │ + │ ┌─────────────────┐ │ +┌─────────────────────────┐ │ │ Intel │ │ +│ Consumer Contract │ │ │ Endorsements │ │ +│ │ │ │ │ │ +│ ┌─────────────────┐ │ │ └────────┬────────┘ │ +│ │ Operation │ │ │ │ │ +│ │ Authorization │ │ │ ▼ │ +│ └─────────────────┘ │ │ ┌─────────────────┐ │ +│ │ │ │ │ Registration │ │ +└─────────┼───────────────┘ │ │ Logic │ │ + │ │ └────────┬────────┘ │ + │ └──────────┼──────────┘ + │ │ +┌─────────▼──────────────┐ ▼ +│ Policy │ ┌───────────────────────────┐ +│ │ isValid │ Flashtestation Registry │ +│ ┌─────────────────────┐│ Query │ │ +│ │ allowedWorkloadIds[]││ ◄───────────────►│ {teeAddress: registration}│ +│ │ {registration: ││ │ map │ +│ │ workloadId} map ││ │ │ +│ └─────────────────────┘│ └───────────────────────────┘ +└────────────────────────┘ +``` + +1. **Onchain Verifier**: Validates TDX attestation quotes against current Intel endorsements +2. **Flashtestation Registry**: Tracks which addresses have valid attestations for specific workload registrations +3. **Policy Registry**: Defines which workloads are acceptable for specific onchain interactions +4. **Transparency Log**: Records all attestations and endorsement changes (implemented via events) + +## Terminology + +The terms in this section are used consistently throughout the specification documents. When a term is first mentioned elsewhere, it links back here. + +### Intel TDX Primitives + +**Trusted Execution Environment (TEE)**: Hardware-based isolated execution environment that protects code and data from the host operating system and other applications. In Intel TDX, the isolation boundary is the "Trust Domain" (TD) rather than the bare CPU. + +**Intel TDX ([Trust Domain Extensions](https://www.intel.com/content/www/us/en/developer/tools/trust-domain-extensions/documentation.html))**: Intel's TEE technology for virtual machines that provides hardware-enforced isolation, integrity verification, and attestation capabilities. TDX creates isolated Trust Domains (TDs) inside virtual machines. + +**Attestation**: The cryptographic process by which a TEE proves its identity and integrity to a verifier. Produces a signed structure (Quote) containing measurements and claims about the TEE's state. + +**DCAP ([Data Center Attestation Primitives](https://download.01.org/intel-sgx/latest/dcap-latest/linux/docs/Intel_TDX_DCAP_Quoting_Library_API.pdf))**: Intel's attestation system designed for data centers that enables verification without requiring direct communication with Intel for each attestation check. + +**Quote**: The cryptographically signed data structure produced during attestation, containing measurement registers and report data fields that uniquely identify the TEE and its contents. Flashtestations scope currently only supports the DCAP v4 Quote format for TDX. + +**Intel DCAP endorsements**: Data provided by Intel that serves as the trust anchor for attestation verification. This includes QE Identity information, TCB Info, certificates, and related data. Also referred to as "Endorsements" in some contexts. + +**Collateral**: See Intel DCAP endorsements. It carries the same meaning. This is not monetary collateral as in crypto-economic systems. Some sources, such as Automatas [onchain PCCS](https://github.com/automata-network/automata-on-chain-pccs) uses collateral as the go to term. + +**TCB (Trusted Computing Base)**: The set of hardware, firmware, and software components critical to a system's security. In TDX, the TCB includes Intel CPU hardware, microcode, and firmware components that must be at approved security levels. + +**Measurement Registers**: Hardware-enforced registers within the TEE (MRTD, RTMRs, MROWNER, etc.) that capture cryptographic hashes of code, data, and configuration loaded into the environment. These registers take part in forming the workload identity. + +**REPORTDATA**: A 64-byte field in the attestation quote containing data defined by the TDX VM. In Flashtestations, this field is structured as: + +``` +REPORTDATA[0:20]: The TEE-controlled address +REPORTDATA[20:52]: keccak256(extendedRegistrationData) - Hash of application-specific data, currently empty. Reserved for future upgrades +REPORTDATA[52:64]: Unused +``` + +**Quote Enclave (QE)**: Intel-provided enclave responsible for signing attestation quotes using Intel-provisioned keys. The QE creates the cryptographic binding between measurement registers and the attestation signature. + +**Provisioning Certification Service (PCS)**: Intel's service that provides the certificates and related data needed to verify attestation quotes. In Flashtestations, we use Automata's onchain PCCS, which stores this data on the blockchain. + +**Attestation Key (AK)**: The cryptographic key used by the Quote Enclave to sign attestation quotes. The validity of this key is established through a certificate chain back to Intel. + +**TDAttributes**: Hardware-enforced attributes that describe the security properties and configuration of a Trust Domain, including settings that affect the TEE's isolation guarantees and security posture. + +**XFAM (Extended Features and Attributes Mask)**: A hardware register that indicates which CPU extended features (such as specific instruction sets or security capabilities) are enabled and available for use within the Trust Domain. + +**TEE-controlled address**: An Ethereum address whose private key was generated inside a TEE and never leaves the TEE boundaries. The TEE uses this address to authenticate itself in onchain transactions, providing cryptographic proof of TEE control over the address. + +### Flashtestations Protocol Components + +**Workload**: The specific software running inside a TEE. Its identity is derived from measurement registers that contain cryptographic hashes of loaded code and configuration. + +**`workloadId`**: A 32-byte hash uniquely identifying a specific TEE workload based on its measurement registers, derived and authorized by the policy contract. + +**`extendedRegistrationData`**: ABI-encoded application-specific attested data that is cryptographically bound to the attestation quote through the `REPORTDATA` field. Currently empty, reserved for future use. The registration data can include for example runtime configuration of the VM, identity of the VM operator, public IP of the instance. + +**Policy Registry**: A mapping system that groups related workload identities under a single policy identifier, enabling flexible authorization rules without modifying consumer contracts. + +**Flashtestation Registry**: The onchain data structure that maintains a 1:1 mapping from TEE-controlled addresses to valid DCAP attestations. Implemented as the FlashtestationRegistry contract. + +**Transparency Log**: The onchain event-based system that records all attestation verifications, registry changes, and endorsement updates for auditability. Implemented through emitted blockchain events rather than as a separate logging service. + +**`Endorsement Version`**: The specific version of Intel DCAP endorsements at a point in time. Endorsements change periodically as Intel releases updates or discovers vulnerabilities in hardware or firmware. + +**Onchain Verifier**: The smart contract component (using Automata's DCAP attestation system) that validates TDX attestation quotes against current Intel DCAP endorsements and interacts with the Flashtestation Registry to register TEE-controlled addresses. + +**PCCS (Provisioning Certificate Caching Service)**: Automata's onchain implementation of Intel's PCCS that stores Intel DCAP endorsements on the blockchain, making it available for attestation verification. This ensures all verification is reproducible on L2. + +### Operational Terms + +**Registration**: The process of adding a TEE-controlled address to the registry after successful attestation verification. + +**Endorsement Revocation**: The process of marking attestations as outdated when Intel updates its security requirements. + +**Housekeeping**: The maintenance process of verifying and updating attestation status when endorsements change. This can be done on-demand via the attestation verification endpoint. + +**TCB Recovery**: The process that occurs when Intel releases updates to address security vulnerabilities in the TCB components. This typically requires updating the list of secure endorsements. + +**Reproducible Build**: A deterministic build process that ensures anyone building the same source code will produce identical binary outputs, enabling verification of expected TEE measurements. + +**TEE Infrastructure Operator**: The entity that has control over the TEE deployment, including the ability to set values in measurement registers such as MROWNER, MRCONFIGID, and MROWNERCONFIG before VM startup. + +## Data Structures + +The protocol defines several key data structures: + +### **`TDReport`** + +The TD Report includes the core information used for attestation of the TDX Quote. This spec only references elements in the report; a full quote also has a header and signature data. + +```python +class TDReport(): + TEETCBSVN: Bytes16 + MRSEAM: Bytes48 + SEAMATTRIBUTES: bytes8 + MRSIGNERSEAM: Bytes48 + MRTD: Bytes48 + RTMR: List[Bytes48, size=4] + MROWNER: Bytes48 + MROWNERCONFIG: Bytes48 + MRCONFIGID: Bytes48 + TDAttributes: Bytes8 + XFAM: Bytes8 + ReportData: Bytes64 +``` + +**Field descriptions:** + +- `TEETCBSVN`: TEE Trusted Computing Base Security Version Numbers (SVNs); indicates platform patch level. Checked against onchain collateral. +- `MRSEAM`: Measurement of the TDX SEAM module itself. Checked against onchain collateral. +- `SEAMATTRIBUTES`: TDX SEAM module attributes. All zeroes in production mode, checked against onchain collateral. +- `MRSIGNERSEAM`: Measurement of the TDX SEAM module's signer (Intel). Checked against onchain collateral. +- `MRTD`: Initial TD measurement (boot loader, initial data). Should be part of the WorkloadId. +- `RTMR`: Runtime measurements (linux kernel, initramfs, etc.). Should be part of the WorkloadId. +- `MROWNER`: Measurement register that takes arbitrary information and can be set by the TEE infrastructure operator before the startup of the VM. Currently not used. +- `MROWNERCONFIG`: same as `MROWNER` +- `MRCONFIGID`: same as `MROWNER` +- `TDAttributes`: Attributes describing the security properties and configuration of the Trust Domain. Should verify the debug flag is not enabled unknowingly. +- `XFAM`: Extended Features and Attributes Mask, indicating which CPU extended features are enabled for the Trust Domain. +- `ReportData`: Confidential VM–defined data included in the report (currently TEE-controlled address and hash of extendedRegistrationData). + +### **`DCAPEndorsements`** + +Data provided by Intel to verify the authenticity of a TDX Quote. + +```python +class DCAPEndorsements(): + QEIdentity: Bytes + TCBInfo: Bytes + QECertificationData: Bytes +``` + +**Field descriptions:** + +- `QEIdentity`: Quoting Enclave Identity. +- `TCBInfo`: Trusted Computing Base information. +- `QECertificationData`: Certification data for the attestation key. + +### **`RegisteredTEE`** + +```python +class RegisteredTEE(): + isValid: Boolean # True upon first registration, false after invalidation + rawQuote: bytes # The raw quote from the TEE device + parsedReportBody: TD10ReportBody # Parsed TDX report body for efficient access + extendedRegistrationData: bytes # Application-specific data bound to the attestation +``` + +**Field descriptions:** + +- `parsedReportBody`: Parsed form of the quote for use in generating workloadId +- `rawQuote`: The raw quote from the TEE device, which is stored to allow for future quote invalidation +- `extendedRegistrationData`: The application-specific attested data, reserved for future upgrades +- `isValid`: True upon first registration, and false after a quote invalidation + +### **`Extended Registration Data`** + +The protocol supports binding additional application-specific data to TEE attestations through the `extendedRegistrationData` field. This enables applications to attest to configuration, public keys, or other data alongside the TEE identity. + +Key properties: +- The hash of this data is included in `reportData[20:52]` of the attestation +- The actual data is stored in the registry for retrieval +- Applications can use this for TEE-specific configuration or capabilities +- Currently supports up to 20KB of data (same limit as quotes) + +Example use cases: +- VM operator public keys for signature verification +- Runtime configuration parameters +- Service endpoints or network addresses +- Application-specific capability declarations + +## TEE Attestation Mechanism + +Attestation is the process by which a TEE proves its identity and integrity. This section of the specification defines how the protocol uses Intel TDX with DCAP (Data Center Attestation Primitives) attestation. + +### Intel TDX DCAP Attestation + +TDX attestation produces a Quote structure which among header and signature contains a [TDReport](#tdreport) + +The attestation process follows these steps: + +1. The TEE generates a TD Report containing its measurement registers and report data +2. The Quote Enclave (QE) creates a Quote by signing the TD Report with an Attestation Key +3. The Quote can be verified against Intel's Provisioning Certification Service (PCS) + +### Onchain DCAP Attestation + +The following code sample illustrates how DCAP attestation verification is performed onchain, and how the key components (workloadId, quote, and TEE-controlled address) are extracted and registered in the Flashtestations registry: + +```solidity +function registerTEEService(bytes calldata rawQuote, bytes calldata extendedRegistrationData) external { + // Verify the DCAP quote + (bool success, bytes memory output) = attestationContract.verifyAndAttestOnChain(rawQuote); + require(success, "Invalid DCAP quote"); + + // Parse the report body + TD10ReportBody memory reportBody = QuoteParser.parseV4VerifierOutput(output); + + // Extract TEE address and extended data hash from reportData + (address teeAddress, bytes32 extDataHash) = QuoteParser.parseReportData(reportBody.reportData); + + // Verify sender controls the TEE address + require(msg.sender == teeAddress, "Sender must match TEE address"); + + // Verify extended data binding + require(keccak256(extendedRegistrationData) == extDataHash, "Invalid registration data"); + + // Store the full attestation data + registeredTEEs[teeAddress] = RegisteredTEE({ + isValid: true, + rawQuote: rawQuote, + parsedReportBody: reportBody, + extendedRegistrationData: extendedRegistrationData + }); +} +``` + +This implementation highlights several key aspects: +1. The DCAP attestation is verified using Automata's onchain verifier +2. The TEE-controlled address is extracted from the attested data +3. The extracted information and raw quote are registered in the registry + +## Flashtestation Registry + +The Flashtestation Registry is a core component of Flashtestations that maintains a 1:1 mapping between TEE-controlled addresses and their TEE attestations. It acts as a bookkeeper for tracking which TEE-controlled addresses have successfully passed attestation verification. Its purpose is to provide a data structure optimistically filled with up-to-date attestation data. In itself it does not provide any filtering in regards to the content of the TEEs. + +### Core Concepts + +At its most abstract level within this specification, the Flashtestation Registry is responsible for: + +1. **Storing addresses** that have been validated through attestation +2. **Associating addresses** with their specific workload identity +3. **Storing attestation quotes** for future verification and revocation, as well as the extendedRegistrationData +4. **Providing efficient lookup** capabilities to verify if an address is authorized for a particular workloadID + +The registry operates on these key abstractions: + +1. **TEE-controlled address**: The address extracted from the quote's report data field ([TDReport.ReportData](#tdreport)), whose private key was generated inside the TEE and is used to interact with onchain contracts. +2. **Parsed Quote**: A struct containing the verified and attested data. It contains the quote in its raw form as well as extracted values which are often used and required such as the workloadId. + + 2.1 **Attestation Quote**: The raw attestation data provided during registration that contains the cryptographic proof of the TEE's state. This quote is stored in the parse quote struct for later verification and revocation. + + 2.2 **Workload Identity (`workloadId`)**: An application-specific 32-byte hash derived from TDX measurement registers that uniquely identifies a specific piece of code and its configuration running in a TDX environment. + +### Key Relationship Model + +The Flashtestation Registry maintains a 1:1 mapping between these entities: + +1. The TEE-controlled address is the mapping key in the 1:1 mapping to the parsed quote struct +2. Each address maps to exactly one parsed quote struct, the mapping value for an address can be overwritten when providing a new _valid_ quote +3. The registry will only accept adding a quote where the msg.sender matches the quote report data (proving control of the address) +4. The registry tracks whether each struct currently contains a valid quote or has been marked as outdated + +### Fundamental Operations + +The Flashtestation Registry provides these core operations: + +#### 1. Lookup and quote retrieval + +The most frequent operation is checking if an address is valid for a specific workload: + +``` +function getRegistration(teeAddress) → (boolean, registration) +``` + +This function operates by: +1. Retrieving the parsed quote struct for the given TEE-controlled address +2. Returning true if the quote has not been invalidated, and the registration data (which includes the raw quote) + +This operation must be highly gas-efficient as it may run on every block. + +#### 2. Registration + +When an attestation successfully passes verification: + +``` +function _recordValidAttestation(workloadId, teeAddress, quote) internal +``` + +This internal operation: +1. Records that this TEE-controlled address has been validated for this workload +2. Stores the raw attestation quote in the mapping for future reference and verification +3. If the address was previously registered, the old entry is replaced +4. Can only be called internally after successful attestation verification and sender validation + +#### 3. Attestation Verification + +To reverify if an attestation is still valid against current endorsements: + +``` +function invalidateAttestation(teeAddress) → boolean +``` + +This operation: +1. The endpoint accepts a TEE-controlled address as input +2. Retrieves the stored attestation quote for the TEE-controlled address +3. It runs the verification against current Intel endorsements +4. If verification fails, marks the address as outdated in the registry +5. The address remains in the registry but will fail the `getRegistration` check +6. Returns the verification result + +### Key Requirements + +1. **Simple Storage Model**: The registry maintains a simple mapping between TEE-controlled addresses, workloadIds, and their attestation quotes without tracking complex endorsement relationships. + +2. **Quote Storage**: The system maintains a copy of the attestation quote used to register each address, supporting external verification and auditability. + +3. **Gas Efficiency**: The lookup operation must be extremely efficient (O(1) gas costs) regardless of the number of addresses stored. + +4. **Reliance on Off-Chain Observation (Initial Version)**: In its current design, the protocol relies on an honest off-chain service (e.g., operated by the TEE infrastructure operators or other trusted entities) to monitor attestations and their associated collateral. This service is expected to call `invalidateAttestation` to mark an attestation as stale if the underlying collateral is updated or becomes invalid. This implies a trust assumption in these off-chain entities to maintain the integrity of the registry's "valid" attestations. + +### Attestation Verification Endpoint + +The attestation verification endpoint provides a mechanism to validate stored attestations against current Intel endorsements: + +1. **On-demand Verification**: Verification happens only when needed, rather than requiring constant maintenance. This is typically triggered by an off-chain service. + +2. **Smooth Transitions**: TEE-controlled addresses are marked as outdated only when their verification actually fails. + +This approach provides a clean, straightforward way to manage attestation validity over time, **though it currently relies on external actors to initiate the `invalidateAttestation` call for attestations that may have become stale due to collateral changes.** + +## Policy Layer: Flexible Authorization + +The Policy layer sits above the Flashtestation Registry and provides a more flexible authorization mechanism. It's realized by Policy contracts, possibly using the same registry. + +### Policy Abstraction + +A Policy is simply a contract authorizing addresses through allowlisting WorkloadIds derived from TEE registration data. + +``` +Policy: [teeAddress -> **`RegisteredTEE`**] -> [**`RegisteredTEE`** -> **`WorkloadId`**] -> [**`WorkloadId`** -> bool] +``` + +This allows downstream contracts and products to reference a policy contract by address rather than managing specific workloads. + +### Workload Metadata + +To provide transparency and allow end-users to verify the source code running within a TEE, the Policy Registry can store metadata that links a `workloadId` to its source code commit. + +This is achieved by storing a direct reference to the specific Git commit hash and a list of locators where the source code can be found. + +```python +class WorkloadMetadata(): + # The Git commit hash of the source code. + commitHash: string + + # An array of URIs pointing to the source code. + sourceLocators: List[string] +``` + +This structure provides flexibility in retrieving the source code. The `sourceLocators` array can include multiple URI schemes to ensure redundancy and support for both traditional and decentralized storage. For example, it may contain `https://`, `git://`, and `ipfs://` URIs. + +### Policy Operations + +The Policy layer provides these operations: + +``` +// Check if an address is allowed under any workload in a policy +function isAllowedPolicy(teeAddress) → boolean +// Mapping of registrations to workloadIds +function workloadIdForTDRegistration(RegisteredTEE) -> workloadId + +// Governance operations +function addWorkloadToPolicy(workloadId) +function removeWorkloadFromPolicy(workloadId) +function setWorkloadMetadata(workloadId, commitHash, sourceLocators) + +// View operations +function getWorkloadMetadata(workloadId) → (commitHash, sourceLocators) +``` + +The key function `isAllowedPolicy` checks if an address is valid for ANY of the workloads in the policy group. Conceptually: + +``` +function isAllowedPolicy(teeAddress) → (bool allowed, WorkloadId) { + // Get TEE registration + (bool isValid, RegisteredTEE memory registration) = registry.getRegistration(teeAddress); + if (!isValid) return (false, WorkloadId(0)); + + // Compute workloadId from registration + WorkloadId workloadId = workloadIdForTDRegistration(registration); + + // Check if computed workloadId is in policy + for each allowedWorkloadId in allowedWorkloadIds { + if (workloadId == allowedWorkloadId) { + return (true, workloadId); + } + } + return (false, WorkloadId(0)); +} +``` + +## End-to-End Flow + +The complete verification flow connects attestation, the registry, and the policy layer. The following diagram illustrates the interactions between all components, from initial registration to runtime authorization and frontend source code verification. + +```mermaid +sequenceDiagram + participant TEE Workload + participant Flashtestation Registry + participant Policy Registry + participant Consumer Contract + participant Frontend + + Note over TEE Workload, Consumer Contract: 1. Attestation & Registration + TEE Workload->>TEE Workload: Generate quote with reportData[0:20]=teeAddress,
reportData[20:52]=keccak256(extendedRegistrationData) + TEE Workload->>Flashtestation Registry: registerTEEService(quote, extendedRegistrationData) + Flashtestation Registry->>Flashtestation Registry: 1. Verify quote with DCAP verifier
2. Extract teeAddress from reportData[0:20]
3. Verify sender == teeAddress
4. Verify keccak256(extendedData) == reportData[20:52] + Flashtestation Registry->>Flashtestation Registry: Store RegisteredTEE{isValid, rawQuote,
parsedReportBody, extendedRegistrationData} + Flashtestation Registry-->>TEE Workload: Success + emit TEEServiceRegistered + + Note over TEE Workload, Consumer Contract: 2. Runtime Authorization + TEE Workload->>Consumer Contract: executeProtectedOperation() + Consumer Contract->>Policy Registry: isAllowedPolicy(teeAddress) + Policy Registry->>Flashtestation Registry: getRegistration(teeAddress) + Flashtestation Registry-->>Policy Registry: (isValid, registration) + Policy Registry->>Policy Registry: 1. Compute workloadId from registration
2. Check if workloadId in policy + Policy Registry-->>Consumer Contract: (allowed=true, workloadId) + Consumer Contract->>Consumer Contract: emit BlockBuilderProofVerified + Consumer Contract-->>TEE Workload: Success + + Note over Frontend, Policy Registry: 3. Frontend Source Verification + Frontend->>Flashtestation Registry: getRegistration(teeAddress) + Flashtestation Registry-->>Frontend: (isValid, registration) + Frontend->>Frontend: Extract parsedReportBody from registration + Frontend->>Policy Registry: workloadIdForTDRegistration(registration) + Policy Registry-->>Frontend: workloadId + Frontend->>Policy Registry: getWorkloadMetadata(workloadId) + Policy Registry-->>Frontend: (commitHash, sourceLocators) + Frontend->>Frontend: Verify source code matches workloadId + + Note over TEE Workload, Flashtestation Registry: 4. Maintenance: Attestation Invalidation + Frontend->>Frontend: Monitor Intel DCAP endorsement updates + Frontend->>Flashtestation Registry: invalidateAttestation(teeAddress) + Flashtestation Registry->>Flashtestation Registry: 1. Retrieve stored rawQuote
2. Re-verify with current endorsements
3. If invalid, set isValid=false + Flashtestation Registry-->>Frontend: emit TEEServiceInvalidated +``` + +### Attestation and Registration + +1. **TEE Environment**: A workload runs in a TDX environment and generates an attestation quote + - The attestation contains measurement registers (determining the `workloadId` as described in [Workload Identity Derivation](#workload-identity-derivation)) + - The report data field contains an Ethereum public key + +2. **Verification Service**: An onchain verification service validates this attestation + - Checks cryptographic signatures + - Validates against current Intel endorsements ([DCAPEndorsements](#dcapendorsements)) + - Extracts the TEE-controlled address and workload measurements + - Validates that msg.sender matches the extracted TEE-controlled address + +3. **Registration**: Upon successful verification and sender validation, the address is registered + ``` + _recordValidAttestation(derivedWorkloadId, teeAddress, rawQuote) + ``` + - If the address was previously registered, the old entry is replaced + +### Runtime Authorization + +When a contract needs to verify if an operation is authorized: + +1. The contract checks if the sender is allowed under a specific policy: + ``` + if (policy.isAllowedPolicy(msg.sender)) { + // Permit the operation + } + ``` + +2. This policy check determines if the address is allowed for any workload in the policy and has not been marked as outdated. + +### Maintenance: Handling Changing Endorsements + +Intel endorsements change over time, requiring a maintenance process: + +1. **Triggered Verification**: When an off-chain service observes that an attestation's collateral has been updated or is no longer valid, it calls the `invalidateAttestation(teeAddress)` function. The system then checks if the corresponding stored attestation is still valid against current Intel endorsements. + +2. **Marking as Outdated**: If verification fails (e.g., due to outdated endorsements or collateral issues identified by the off-chain service leading to the `invalidateAttestation` call), the address is automatically marked as outdated in the registry. + +3. **Re-attestation**: Addresses marked as outdated must re-attest using current endorsements to regain valid status. + +This approach ensures that addresses transition from valid to outdated as Intel's security requirements evolve or as collateral changes, **facilitated by an honest off-chain service responsible for monitoring and triggering these checks.** The registry itself remains synchronized with Intel's current security opinions once verification is invoked. + +### Gas Cost Considerations and Future Optimizations + +The individual attestation verification approach prioritizes simplicity but requires running an offchain observation service for maintaining freshness of attestations. Also, each verification requires running the complete attestation verification process against current endorsements. + +Future optimizations could include **Validation on Access**, where the system would verify the endorsement status upon each call to `getRegistration`. This could be achieved by enhancing the `ParsedAttestation` struct to track necessary endorsement data (or a reference to it). The `getRegistration` function would then validate the freshness of the endorsements used during attestation directly against on-chain PCCS data with each call. Should it detect stale data, a subsequent new attestation would be triggered. Here also there's potential to reduce Gas Costs by only reattesting against the updated PCCS endorsement data rather the whole quote. + +These optimizations would maintain the design's simplicity while reducing the reliance on off-chain services for marking attestations stale and providing more gas-efficient ways to handle endorsement changes, especially as the number of registered addresses grows. That said, it will increase the gas cost of `getRegistration` although initial investigation has shown that this may be neglectable. + +## Offchain TEE Verification + +The Flashtestations protocol enables comprehensive offchain verification of TEE services through its quote storage mechanism. Applications can retrieve the original attestation quote for any registered TEE-controlled address via the getRegistration(teeAddress) function, allowing for complete independent verification without incurring gas costs. This approach permits offchain entities to perform the same cryptographic validation as the original onchain verifier, including measurement verification and endorsement checks against the Intel PCS. + +### Example Verification Flow + +```javascript +// JavaScript example for offchain quote verification +async function verifyTEEAddressOffchain(teeAddress) { + const registry = new ethers.Contract(REGISTRY_ADDRESS, REGISTRY_ABI, provider); + // Retrieve the stored attestation quote + const registration = await registry.getRegistration(teeAddress); + // Verify the quote against Intel endorsements using local DCAP verification + return verifyDCAPQuoteLocally(registration.quote, teeAddress); +} +``` + +## Transparency Log + +The L2 blockchain functions as a transparency log within Flashtestations, maintaining a permanent record of attestation events and their verification. This log provides auditability, verifiability, and transparency for the entire TEE attestation ecosystem. + +### Purpose and Benefits + +The transparency log serves several critical functions: +1. **Public Verifiability**: Anyone can independently verify that attestations were properly validated +2. **Historical Tracking**: Provides a complete history of which TEEs were registered when, and under which endorsements +3. **Audit Trail**: Creates an immutable record that can be used for forensic analysis or compliance +4. **Endorsement Evolution**: Tracks how Intel's hardware/firmware security evaluations change over time + +### Logged Information + +As specified in this protocol, the transparency log captures raw attestation data along with verification results: + +1. **Raw Attestation Quotes**: The complete DCAP quotes submitted for verification +2. **Intel Endorsements**: The actual endorsement data (endorsements) used to validate attestations +3. **Verification Events**: Records of successful and failed attestation attempts +4. **Endorsement Updates**: Records of when new Intel endorsements are published or old ones revoked + +### Implementation Approach + +The transparency log is implemented through a combination of blockchain events and calldata storage: + +```solidity +// Event definitions for the transparency log +event AttestationSubmitted( + bytes indexed rawQuote, + bytes32 indexed workloadId, + address teeAddress, + bool success +); + +event EndorsementUpdated( + bytes rawEndorsementData, + bool isValid +); + +event TEEServiceRegistered( + address indexed teeAddress, + bytes rawQuote, + bool previouslyRegistered +); + +event QuoteStored( + address indexed teeAddress, + bytes quote +); +``` + +When an attestation is verified, the raw quote data is included in the transaction calldata, making it permanently available onchain. The verification results and extracted data are then emitted as events for efficient indexing and querying. + +### Relationship with Registry + +While the Flashtestation Registry maintains the current attested state (which TEE-controlled addresses are currently valid for which workloads), the transparency log maintains the complete history of how that state evolved: + +1. **Registry**: Optimized for efficient runtime checks and state updates +2. **Transparency Log**: Optimized for auditability and historical verification + +This dual approach specified in the protocol enables efficient onchain operations while maintaining complete transparency and auditability. + +# Security Assumptions + +The Flashtestations protocol's security fundamentally depends on specific behavioral guarantees from the TEE workloads that participate in the system. These assumptions are critical to understand, as violating them would compromise the entire security model: + +**Private Key Management**: The TEE workload must generate the TEE-controlled address key pair securely within the TEE boundaries during startup using a cryptographically secure random number generator. The private key must never leave the TEE in plaintext form under any circumstances. The moment this private key is exposed outside the TEE, the entire attestation-based trust model fails, as an attacker could impersonate the TEE without detection. + +**Attestation Quote Integrity**: The TEE workload must maintain strict control over attestation quote generation. It must not provide any mechanism—whether intentional or through exploitation—that allows external entities to influence the `ReportData` field of generated quotes. Even if the private key remains secure within the TEE, allowing an attacker to specify arbitrary `ReportData` content would enable them to create attestations for addresses they control, completely undermining the system's security guarantees. + +**Extended Data Integrity**: TEE workloads that use extended registration data must ensure this data is generated or validated within the TEE before inclusion in the attestation. Any compromise of the extended data generation process could allow attackers to bind arbitrary data to legitimate TEE attestations. + +**Reproducible Builds**: To establish trust in the expected workload measurements, TEE workloads must be built using reproducible build processes. The source code, build environment, build instructions, and all dependencies must be publicly available to enable independent verification that the published source code corresponds to the expected measurement values (`workloadId`). Without this reproducibility, there is no way to verify what code is actually running within the TEE. + +**Important Note**: Identifying all security requirements and potential attack vectors for TEE-based systems is an ongoing area of research and development. This specification does not claim to have identified all possible corner cases or security considerations. Implementers should conduct thorough security reviews and consider additional safeguards based on their specific use cases and threat models. The security assumptions outlined here represent the minimum requirements for the protocol to function as intended, but additional security measures may be necessary depending on the application context. + +# Rationale + +The following explains the reasoning behind key design decisions in the Flashtestations protocol: + +### Replacement Model + +The protocol uses a direct replacement approach for attestations: + +- When a TEE-controlled address re-attests for a workloadId, its old entry is replaced with the new one +- This keeps the model simpler by ensuring each address has exactly one current endorsement per workloadId +- When endorsements become invalid, all addresses using that specific endorsement are removed completely + +### Gas Optimization + +The rationale for gas optimization in the protocol design is that the system must prioritize efficiency, particularly for the lookup operations: + +- Lookups should reflect O(1) gas costs regardless of the number of TEE-controlled addresses +- Storage slots should be fully cleared when no longer needed (to receive gas refunds) +- Batch operations should be supported for removing addresses when endorsements become invalid + +### Separation of Concerns + +A key design rationale is maintaining clear separation between: + +1. **Flashtestation Registry**: Tracks which TEE-controlled addresses have attestations validated by current endorsements +2. **Policy Registry**: Defines which workloads are acceptable for specific onchain operations +3. **Verification Service**: Validates attestations and updates the registry +4. **Consumer Contracts**: Use policy checks to authorize operations + +This separation enables each component to evolve independently, with governance focused on the appropriate level of abstraction. + +The Flashtestation Registry also provides direct access to stored attestation quotes, allowing external systems to perform their own verification or analysis without requiring additional onchain transactions. + +## Block Builder TEE Proofs + +The Flashtestations protocol can be extended to provide cryptographic guarantees that blocks were constructed by an authorized TEE-based block builder. This section describes how block builders running in a TEE can prove block authenticity through an onchain verification mechanism. + +### Core Mechanism + +The block builder TEE proof system works through a final transaction appended to each block. This transaction: + +1. Calls a designated smart contract method that accepts a block content hash +2. Verifies the caller's authorization using `isAllowedPolicy(msg.sender)` + +The key insight is that the required private key to sign this transaction is protected within the TEE environment. Thus, only a genuine TEE-based block builder with the proper attestation can successfully execute this transaction. + +### Block Building Process + +When building a block, the TEE block builder: + +1. Produces a block according to the L2 protocol rules +2. Computes the block content hash using the `ComputeBlockContentHash` function: + +```solidity +function ComputeBlockContentHash(block, transactions) { + // Create ordered list of all transaction hashes + transactionHashes = [] + for each tx in transactions: + txHash = keccak256(rlp_encode(tx)) + transactionHashes.append(txHash) + + // Compute a single hash over block data and transaction hashes + // This ensures the hash covers the exact transaction set and order + return keccak256(abi.encode( + block.parentHash, + block.number, + block.timestamp, + transactionHashes + )) +} +``` + +This block content hash formulation provides a balance between rollup compatibility and verification strength: +- Contains the minimal set of elements needed to uniquely identify a block's contents +- Compatible with data available on L1 for most optimistic and ZK rollup implementations +- Enables signature verification without requiring state root dependencies + +### Verification Contract + +The smart contract that verifies block builder TEE proofs includes a `version` parameter to enable forward compatibility. This allows the system to evolve the `blockContentHash` calculation method while maintaining backward compatibility and enabling offchain verifiers to understand which hash calculation method to use for verification. + +```solidity +// Block builder verification contract +function verifyBlockBuilderProof(uint8 version, bytes32 blockContentHash) external { + // Check if the caller is an authorized TEE block builder + require( + IPolicyRegistry(POLICY_REGISTRY_CONTRACT).isAllowedPolicy(msg.sender), + "Unauthorized block builder" + ); + + // At this point, we know: + // 1. The caller is a registered TEE-controlled address from an attested TEE + // 2. The TEE is running an approved block builder workload (via policy) + + // Note: Due to EVM limitations (no retrospection), we cannot validate the blockContentHash + // onchain. We rely on the TEE workload to correctly compute this hash according to the + // specified version of the calculation method. + + emit BlockBuilderProofVerified( + msg.sender, + block.number, + version, + blockContentHash + ); +} +``` + +### Security Properties + +This mechanism provides several important security guarantees: + +1. **Block Authenticity**: Each block contains cryptographic proof that it was produced by an authorized TEE block builder +2. **Non-Transferability**: The proof cannot be stolen or reused by unauthorized parties due to TEE protection of signing keys +3. **Policy Flexibility**: The system can adapt to new block builder implementations by updating the policy without contract changes +4. **Auditability**: All proofs are recorded onchain for transparency and verification diff --git a/rust/rollup-boost/vercel.json b/rust/rollup-boost/vercel.json new file mode 100644 index 0000000000000..b00648da8a68d --- /dev/null +++ b/rust/rollup-boost/vercel.json @@ -0,0 +1,10 @@ +{ + "version": 2, + "buildCommand": "chmod +x ./.vercel/build.sh && ./.vercel/build.sh", + "outputDirectory": "vercel-output", + "ignoreCommand": "git diff --quiet HEAD^ HEAD ./book/", + "github": { + "silent": false, + "autoJobCancelation": true + } +}