diff --git a/.config/forest.dic b/.config/forest.dic index bee8d2806ef7..596365ba1a0e 100644 --- a/.config/forest.dic +++ b/.config/forest.dic @@ -1,4 +1,4 @@ -267 +269 Algorand/M API's API/SM @@ -179,6 +179,7 @@ ParityDb parsable peerstore/S performant +PMF pnpm PoC pointer/SM @@ -208,6 +209,7 @@ semver serializable serializer/SM serverless +Skellam skippable Sqlx statediff diff --git a/Cargo.lock b/Cargo.lock index 8abdad99387f..6723e255a96f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -74,6 +74,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "all_asserts" +version = "2.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "514ce16346f9fc96702fd52f2ae7e383b185516ee6f556efd7c3176be8fe7bea" + [[package]] name = "alloc-no-stdlib" version = "2.0.4" @@ -3244,6 +3250,7 @@ name = "forest-filecoin" version = "0.32.4" dependencies = [ "ahash", + "all_asserts", "anes 0.2.1", "anyhow", "argon2", @@ -3345,6 +3352,7 @@ dependencies = [ "keccak-hash", "kubert-prometheus-process", "lazy-regex", + "libm", "libp2p", "libp2p-swarm-test", "libsecp256k1", @@ -3375,6 +3383,7 @@ dependencies = [ "pretty_assertions", "proc-macro2", "prometheus-client", + "puruspe", "quick-protobuf", "quick-protobuf-codec", "quickcheck", @@ -5369,6 +5378,16 @@ dependencies = [ "tracing", ] +[[package]] +name = "lambert_w" +version = "1.2.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5f0846ee4f0299ca4c5b9ca06ff55cf88b3430a763bf591474cc734479c9b24" +dependencies = [ + "num-complex", + "num-traits", +] + [[package]] name = "lazy-regex" version = "3.6.0" @@ -7492,6 +7511,16 @@ dependencies = [ "syn 2.0.117", ] +[[package]] +name = "puruspe" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d62b4ad8b456f2ac8a171567bd475cae8cecd53ff5a46fce4f261eef17648" +dependencies = [ + "lambert_w", + "num-complex", +] + [[package]] name = "quick-protobuf" version = "0.8.1" diff --git a/Cargo.toml b/Cargo.toml index 6515960b4583..c86a56340bdd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -125,6 +125,7 @@ jsonwebtoken = { version = "10", features = ["aws_lc_rs"] } keccak-hash = "0.12" kubert-prometheus-process = "0.2" lazy-regex = "3" +libm = "0.2" libp2p = { workspace = true, features = [ 'autonat', 'gossipsub', @@ -173,6 +174,7 @@ pin-project-lite = "0.2" positioned-io = "0.3" pretty_assertions = "1" prometheus-client = "0.23" +puruspe = "0.4" quick-protobuf = "0.8" quick-protobuf-codec = "0.3" rand = "0.8" @@ -241,6 +243,7 @@ tracing-loki = { version = "0.2", default-features = false, features = ["compat- termios = "0.3" [dev-dependencies] +all_asserts = "2" ariadne = "0.6" assert_cmd = "2" bimap = "0.6" diff --git a/src/chain/ec_finality/calculator/mod.rs b/src/chain/ec_finality/calculator/mod.rs new file mode 100644 index 000000000000..2f21a6e2f60a --- /dev/null +++ b/src/chain/ec_finality/calculator/mod.rs @@ -0,0 +1,298 @@ +// Copyright 2019-2026 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +//! Implements the `FRC-0089` EC finality calculator. +//! +//! The calculator computes an upper bound on the probability that a confirmed +//! tipset could be reorganized out of the canonical chain by an adversarial +//! fork, using observed chain data (block counts per epoch). Under healthy +//! network conditions (~5 blocks/epoch), the 2^-30 finality guarantee +//! (roughly one-in-a-billion chance of reorg) is typically achieved within +//! ~30 epochs (~15 minutes), compared to the static 900-epoch (~7.5 hour) +//! EC finality assumption which is based on worst-case network conditions. +//! +//! Reference: https://github.com/filecoin-project/FIPs/blob/master/FRCs/frc-0089.md +//! Python reference: https://github.com/consensus-shipyard/ec-finality-calculator + +mod skellam; +#[cfg(test)] +mod tests; + +use anyhow::Context as _; + +// `BISECT_LOW` and `BISECT_HIGH` define the search range for the bisect algorithm +// that finds the epoch depth at which the finality guarantee is met. A low +// bound of 3 avoids evaluating trivially shallow depths; a high bound of +// 200 accommodates degraded chains that take longer to finalize. +#[allow(dead_code)] +pub const BISECT_LOW: i64 = 3; +#[allow(dead_code)] +pub const BISECT_HIGH: i64 = 200; + +// the Filecoin mainnet expected block production rate. +#[allow(dead_code)] +pub const DEFAULT_BLOCKS_PER_EPOCH: f64 = 5.0; + +// the standard Filecoin security assumption for adversarial mining power. +#[allow(dead_code)] +pub const DEFAULT_BYZANTINE_FRACTION: f64 = 0.3; + +// the target reorg probability as a power of 2. 2^-30 (~one-in-a-billion) is the standard Filecoin finality guarantee. +#[allow(dead_code)] +pub const DEFAULT_SAFETY_EXPONENT: i64 = -30; + +/// Computes the upper-bound probability that a confirmed +/// tipset could be reorganized out of the canonical chain. This is a port +/// of the Python reference implementation from `FRC-0089`(`finality_calc_validator.py`). +#[allow(dead_code)] +pub fn calc_validator_prob( + chain: &[i64], + finality: i64, + blocks_per_epoch: f64, + byzantine_fraction: f64, + current_epoch: i64, + target_epoch: i64, +) -> anyhow::Result { + if current_epoch <= target_epoch || target_epoch < 0 || current_epoch >= chain.len() as i64 { + return Ok(1.0); + } + + const NEGLIGIBLE_THRESHOLD: f64 = 1e-25; + + let mut max_k_l = 400; + let mut max_k_b = ((current_epoch - target_epoch) * (blocks_per_epoch as i64)) as usize; + let mut max_k_m = 400; + let max_im = 100; + + let rate_malicious_blocks = blocks_per_epoch * byzantine_fraction; + let rate_honest_blocks = blocks_per_epoch - rate_malicious_blocks; + + // Compute L: adversarial lead distribution at target epoch + let mut pr_l = vec![0.; max_k_l + 1]; + + let mut pr_l_k_prev = 0.0; + for (k, pr_l_k) in pr_l.iter_mut().enumerate() { + let mut sum_expected_adversarial_blocks_i = 0.0; + let mut sum_chain_blocks_i = 0; + + for chain_i in chain + .get(((current_epoch - finality).max(0) as usize)..(target_epoch as usize)) + .context("unexpected slice indexing error 1")? + .iter() + .rev() + { + sum_expected_adversarial_blocks_i += rate_malicious_blocks; + sum_chain_blocks_i += chain_i; + let prl_i = poisson_prob( + sum_expected_adversarial_blocks_i, + (k as i64 + sum_chain_blocks_i) as f64, + ); + *pr_l_k = prl_i.max(*pr_l_k); + } + if k > 1 && *pr_l_k < NEGLIGIBLE_THRESHOLD && *pr_l_k < pr_l_k_prev { + max_k_l = k; + pr_l.truncate(k + 1); + break; + } + pr_l_k_prev = *pr_l_k; + } + + *pr_l + .get_mut(0) + .context("unexpected slice indexing error 2")? += 1. - pr_l.iter().sum::(); + + // Compute B: adversarial blocks during settlement period + let mut pr_b = vec![0.; max_k_b + 1]; + let mut pr_b_k_prev = 0.0; + for (k, pr_b_k) in pr_b.iter_mut().enumerate() { + *pr_b_k = poisson_prob( + ((current_epoch - target_epoch) as f64) * rate_malicious_blocks, + k as f64, + ); + + if k > 1 && *pr_b_k < NEGLIGIBLE_THRESHOLD && *pr_b_k < pr_b_k_prev { + max_k_b = k; + pr_b.truncate(k + 1); + break; + } + pr_b_k_prev = *pr_b_k; + } + + // Compute M: adversarial mining advantage in the future (Skellam distribution) + let pr_hgt_0 = 1.0 - poisson_prob(rate_honest_blocks, 0.0); + + let mut exp_z = 0.0; + for k in 0..((4. * blocks_per_epoch) as usize) { + let pmf = poisson_prob(rate_malicious_blocks, k as f64); + exp_z += ((rate_honest_blocks + k as f64) / (2.0_f64.powf(k as f64))) * pmf; + } + + let rate_public_chain = pr_hgt_0 * exp_z; + + let mut pr_m = vec![0.; max_k_m + 1]; + let mut pr_m_k_prev = 0.0; + for (k, pr_m_k) in pr_m.iter_mut().enumerate() { + for i in (1..=max_im).rev() { + let prob_m_i = skellam::skellam_pmf( + k as f64, + f64::from(i) * rate_malicious_blocks, + f64::from(i) * rate_public_chain, + ); + if prob_m_i < NEGLIGIBLE_THRESHOLD && prob_m_i < *pr_m_k { + break; + } + *pr_m_k = prob_m_i.max(*pr_m_k); + } + + if k > 1 && *pr_m_k < NEGLIGIBLE_THRESHOLD && *pr_m_k < pr_m_k_prev { + max_k_m = k; + pr_m.truncate(k + 1); + break; + } + pr_m_k_prev = *pr_m_k; + } + + *pr_m + .get_mut(0) + .context("unexpected slice indexing error 3")? += 1. - pr_m.iter().sum::(); + + // Compute reorg probability upper bound via convolution + let cumsum_l = cumsum(&pr_l); + let cumsum_b = cumsum(&pr_b); + let cumsum_m = cumsum(&pr_m); + + let k = chain + .get((target_epoch as usize)..(current_epoch as usize)) + .context("unexpected slice indexing error 4")? + .iter() + .sum(); + + let mut sum_l_ge_k = *cumsum_l.last().context("cumsum_l should not be empty")?; + if k > 0 { + sum_l_ge_k -= *cumsum_l + .get(max_k_l.min(k as usize - 1)) + .context("unexpected slice indexing error 5")?; + } + + let mut double_sum = 0.0; + + for l in 0..k { + let mut sum_b_ge_k_min_l = *cumsum_b.last().context("cumsum_b should not be empty")?; + if k - l - 1 > 0 { + sum_b_ge_k_min_l -= *cumsum_b + .get(max_k_b.min((k - l - 1) as usize)) + .context("unexpected slice indexing error 6")?; + } + let pr_l_i = pr_l + .get(max_k_l.min(l as usize)) + .context("unexpected slice indexing error 7")?; + double_sum += *pr_l_i * sum_b_ge_k_min_l; + + for b in 0..(k - l) { + let mut sum_m_ge_k_min_l_min_b = + *cumsum_m.last().context("cumsum_m should not be empty")?; + if k - l - b - 1 > 0 { + sum_m_ge_k_min_l_min_b -= *cumsum_m + .get(max_k_m.min((k - l - b - 1) as usize)) + .context("unexpected slice indexing error 8")?; + } + double_sum += *pr_l_i + * *pr_b + .get(max_k_b.min(b as usize)) + .context("unexpected slice indexing error 9")? + * sum_m_ge_k_min_l_min_b + } + } + + let pr_error = sum_l_ge_k + double_sum; + Ok(pr_error.min(1.)) +} + +/// Performs a bisect search to find the shallowest depth at +/// which the reorg probability drops below the given guarantee. Returns -1 if +/// the guarantee is not met within the search range. +#[allow(dead_code)] +pub fn find_threshold_depth( + chain: &[i64], + finality: i64, + blocks_per_epoch: f64, + byzantine_fraction: f64, + guarantee: f64, +) -> anyhow::Result { + let current_epoch = chain.len() as i64 - 1; + let (mut low, mut high) = (BISECT_LOW, BISECT_HIGH.min(current_epoch)); + + if low >= high { + return Ok(-1); + } + + let prob_low = calc_validator_prob( + chain, + finality, + blocks_per_epoch, + byzantine_fraction, + current_epoch, + current_epoch - low, + )?; + if prob_low < guarantee { + return Ok(low); + } + + let prob_high = calc_validator_prob( + chain, + finality, + blocks_per_epoch, + byzantine_fraction, + current_epoch, + current_epoch - high, + )?; + if prob_high > guarantee { + return Ok(-1); + } + + while low < high { + let mid = (low + high) / 2; + let prob = calc_validator_prob( + chain, + finality, + blocks_per_epoch, + byzantine_fraction, + current_epoch, + current_epoch - mid, + )?; + if prob < guarantee { + high = mid + } else { + low = mid + 1; + } + } + Ok(low) +} + +fn poisson_prob(lambda: f64, x: f64) -> f64 { + poisson_log_prob(lambda, x).exp() +} + +fn poisson_log_prob(lambda: f64, x: f64) -> f64 { + if x < 0. || x.floor() != x { + return f64::NEG_INFINITY; + } + if lambda == 0. { + if x == 0. { + return 0.; // P(X=0 | lambda=0) = 1, log(1) = 0 + } + return f64::NEG_INFINITY; + } + let lg = libm::lgamma(x.floor() + 1.); + x * lambda.ln() - lambda - lg +} + +fn cumsum(arr: &[f64]) -> Vec { + let mut result = Vec::with_capacity(arr.len()); + let mut s = 0.0; + for v in arr { + s += v; + result.push(s); + } + result +} diff --git a/src/chain/ec_finality/calculator/skellam.rs b/src/chain/ec_finality/calculator/skellam.rs new file mode 100644 index 000000000000..6153fbd36853 --- /dev/null +++ b/src/chain/ec_finality/calculator/skellam.rs @@ -0,0 +1,138 @@ +// Copyright 2019-2026 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +/// Calculate the probability mass function (PMF) of a Skellam distribution. +/// Ported from +/// +/// The Skellam distribution is the probability distribution of the difference +/// of two independent Poisson random variables. +/// +/// # Arguments +/// +/// * `k` - The difference of two Poisson random variables. +/// * `mu1` - The expected value of the first Poisson distribution. +/// * `mu2` - The expected value of the second Poisson distribution. +/// +/// # Returns +/// +/// * A `f64` representing the PMF of the Skellam distribution at `k`. +/// +pub(super) fn skellam_pmf(k: f64, mu1: f64, mu2: f64) -> f64 { + if mu1.is_nan() || mu1 <= 0.0 || mu2.is_nan() || mu2 <= 0.0 { + return f64::NAN; + } + let bessel_i = puruspe::bessel::In(k.abs() as u32, 2.0 * (mu1 * mu2).sqrt()); + (-mu1 - mu2).exp() * (mu1 / mu2).powf(k / 2.0) * bessel_i +} + +#[cfg(test)] +mod tests { + use super::*; + use all_asserts::*; + use rstest::rstest; + + // Ported from . + // It validates SkellamPMF against the direct + // Poisson convolution sum. The convolution is an independent computation path + // (no Bessel functions) and is reliable for moderate parameters. + #[rstest] + #[case(0, 1.5, 3.5)] + #[case(1, 1.5, 3.5)] + #[case(-1, 1.5, 3.5)] + #[case(-3, 1.5, 3.5)] + #[case(5, 1.5, 3.5)] + #[case(0, 0.3, 0.7)] + #[case(1, 0.3, 0.7)] + #[case(-2, 0.3, 0.7)] + #[case(0, 5.0, 5.0)] + #[case(3, 5.0, 5.0)] + #[case(-3, 5.0, 5.0)] + #[case(0, 15.0, 35.0)] + #[case(5, 15.0, 35.0)] + #[case(-10, 15.0, 35.0)] + #[case(-20, 15.0, 35.0)] + #[case(0, 75.0, 175.0)] + #[case(10, 75.0, 175.0)] + #[case(-50, 75.0, 175.0)] + #[case(0, 15.0, 8.0)] + #[case(7, 15.0, 8.0)] + fn test_skellam_pmf_against_convolution(#[case] k: i64, #[case] mu1: f64, #[case] mu2: f64) { + let got = skellam_pmf(k as f64, mu1, mu2); + let want = poisson_convolution_pmf(k, mu1, mu2); + let relative_error = (got - want).abs() / want.abs(); + assert_lt!( + relative_error, + 1e-10, + "Skellam PMF does not match convolution for k={k}, mu1={mu1}, mu2={mu2}, got={got}, want={want}, relative_error={relative_error}" + ); + } + + // Ported from . + // It verifies that the PMF values sum to ~1 over a + // sufficient range. This is a structural property of any valid PMF. + #[rstest] + #[case(1.5, 3.5, -30, 30)] + #[case(15.0, 35.0, -60, 20)] + #[case(5.0, 5.0, -20, 20)] + #[case(15.0, 8.0, -20, 40)] + fn test_skellam_pmf_sums_to_one( + #[case] mu1: f64, + #[case] mu2: f64, + #[case] k_min: i64, + #[case] k_max: i64, + ) { + let mut total = 0.0; + for k in k_min..=k_max { + total += skellam_pmf(k as f64, mu1, mu2); + } + assert_lt!( + (total - 1.0).abs(), + 1e-6, + "sum over [{k_min}, {k_max}] = {total}, want ~1.0" + ); + } + + #[rstest] + #[case(0, -1.0, 1.0)] + #[case(0, 1.0, -1.0)] + #[case(0, 0.0, 1.0)] + #[case(0, f64::NAN, 1.0)] + fn test_skellam_pmf_invalid_inputs(#[case] k: i64, #[case] mu1: f64, #[case] mu2: f64) { + assert!( + skellam_pmf(k as f64, mu1, mu2).is_nan(), + "Expected NaN for invalid inputs k={k}, mu1={mu1}, mu2={mu2}" + ); + } + + /// computes the Skellam PMF from its definition as a + /// convolution of two Poisson distributions: + /// + /// `P(K=k) = sum_{j=max(0,-k)}^{inf} Poisson(j+k, mu1) * Poisson(j, mu2)` + /// + /// This is numerically reliable for moderate parameters and serves as an + /// independent reference implementation (no Bessel functions involved). + fn poisson_convolution_pmf(k: i64, mu1: f64, mu2: f64) -> f64 { + let mut j_start = 0; + if k < 0 { + j_start = -k; + } + + let mut total = 0.0; + for j in j_start..(j_start + 2000) { + let log_p1 = poisson_log_pmf((j + k) as f64, mu1); + let log_p2 = poisson_log_pmf(j as f64, mu2); + let term = (log_p1 + log_p2).exp(); + total += term; + if j > j_start + 10 && term < total * 1e-16 { + break; + } + } + total + } + + fn poisson_log_pmf(k: f64, lambda: f64) -> f64 { + assert!(k >= 0., "k should not be negative"); + let lg = libm::lgamma(k + 1.); + k * libm::log(lambda) - lambda - lg + } +} diff --git a/src/chain/ec_finality/calculator/tests.rs b/src/chain/ec_finality/calculator/tests.rs new file mode 100644 index 000000000000..2d394699e19e --- /dev/null +++ b/src/chain/ec_finality/calculator/tests.rs @@ -0,0 +1,265 @@ +// Copyright 2019-2026 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +#![allow(clippy::excessive_precision)] + +use super::*; +use all_asserts::*; +use rstest::rstest; + +// The finality parameter used by the Python reference (and Filecoin mainnet). +const TEST_FINALITY: i64 = 900; + +// Test vectors generated by the Python reference implementation from FRC-0089: +// https://github.com/consensus-shipyard/ec-finality-calculator (finality_calc_validator.py) +// +// Parameters: blocks_per_epoch=5.0, byzantine_fraction=0.3 +// Chain: 905 epochs generated with numpy.random.default_rng(0).poisson(4.5, 905) +// Python: scipy 1.15.2, numpy 2.2.3 +// current_epoch = 904, target_epoch = current_epoch - depth +const PYTHON_REFERENCE_CHAIN: [i64; 905] = [ + 2, 3, 4, 3, 4, 7, 5, 6, 5, 5, 4, 2, 3, 3, 10, 7, 3, 8, 6, 3, 2, 3, 5, 3, 7, 6, 5, 3, 4, 6, 6, + 8, 6, 3, 2, 6, 5, 2, 4, 4, 4, 6, 5, 7, 8, 6, 3, 0, 10, 8, 3, 7, 4, 6, 4, 6, 5, 2, 5, 5, 7, 6, + 2, 1, 3, 5, 3, 5, 10, 4, 0, 5, 11, 6, 8, 6, 4, 8, 3, 4, 3, 2, 5, 6, 6, 5, 3, 9, 5, 2, 9, 3, 6, + 5, 4, 6, 2, 3, 4, 7, 5, 8, 2, 6, 0, 3, 5, 6, 6, 4, 3, 6, 5, 2, 3, 4, 6, 1, 5, 3, 5, 7, 2, 4, + 11, 3, 4, 8, 5, 3, 6, 6, 7, 5, 1, 2, 1, 4, 4, 5, 6, 4, 2, 6, 5, 5, 1, 2, 5, 5, 0, 4, 4, 7, 4, + 10, 6, 4, 9, 5, 5, 1, 0, 3, 7, 1, 6, 4, 3, 5, 7, 6, 10, 3, 5, 4, 1, 6, 2, 2, 2, 5, 4, 7, 4, 2, + 5, 6, 3, 8, 4, 6, 6, 5, 3, 3, 3, 2, 5, 5, 6, 6, 4, 7, 4, 1, 3, 6, 10, 3, 3, 4, 6, 3, 6, 5, 4, + 3, 7, 6, 2, 4, 2, 3, 1, 9, 5, 1, 5, 6, 4, 3, 8, 3, 6, 3, 2, 2, 1, 2, 3, 6, 2, 4, 2, 4, 5, 5, 4, + 4, 7, 8, 8, 8, 8, 6, 2, 3, 3, 4, 4, 3, 3, 1, 4, 5, 6, 3, 4, 7, 4, 1, 2, 2, 10, 2, 2, 3, 3, 5, + 4, 5, 3, 5, 1, 8, 4, 2, 6, 4, 9, 4, 7, 2, 2, 4, 4, 3, 3, 4, 7, 6, 4, 2, 8, 1, 4, 3, 4, 7, 4, 0, + 6, 7, 4, 4, 6, 3, 5, 7, 4, 8, 2, 2, 6, 4, 5, 3, 3, 3, 1, 4, 2, 4, 3, 5, 2, 3, 0, 6, 4, 7, 3, 6, + 3, 4, 4, 6, 3, 3, 2, 7, 4, 4, 5, 3, 4, 5, 3, 4, 4, 3, 4, 1, 5, 4, 4, 5, 4, 2, 4, 5, 3, 6, 5, 6, + 3, 4, 4, 4, 5, 4, 4, 5, 4, 4, 2, 5, 2, 4, 2, 1, 6, 6, 5, 5, 4, 9, 3, 2, 6, 4, 2, 4, 7, 7, 5, 5, + 7, 8, 2, 5, 4, 5, 1, 4, 5, 2, 5, 6, 5, 4, 4, 8, 5, 5, 6, 6, 0, 2, 4, 5, 5, 3, 5, 4, 8, 2, 4, 6, + 7, 3, 5, 5, 7, 1, 2, 5, 3, 10, 5, 10, 1, 10, 3, 5, 5, 2, 6, 2, 5, 4, 2, 1, 5, 9, 2, 4, 4, 2, 2, + 5, 5, 6, 4, 1, 6, 5, 5, 2, 6, 1, 9, 4, 7, 3, 8, 5, 4, 5, 6, 8, 5, 4, 3, 3, 2, 3, 3, 4, 4, 7, 7, + 3, 4, 4, 4, 6, 3, 3, 4, 5, 4, 1, 3, 8, 5, 4, 5, 7, 5, 8, 2, 7, 9, 5, 3, 7, 5, 6, 6, 5, 6, 8, 4, + 6, 3, 5, 4, 6, 2, 2, 6, 5, 4, 6, 3, 3, 4, 5, 2, 3, 3, 6, 6, 4, 5, 4, 3, 8, 4, 8, 3, 5, 3, 6, 4, + 6, 1, 3, 3, 4, 8, 5, 7, 4, 5, 5, 1, 3, 6, 5, 3, 6, 3, 5, 5, 6, 5, 6, 5, 7, 6, 4, 7, 6, 5, 3, 3, + 2, 4, 8, 4, 5, 1, 4, 8, 1, 2, 2, 2, 4, 11, 1, 3, 3, 2, 1, 7, 7, 3, 4, 5, 2, 5, 6, 3, 6, 3, 9, + 3, 0, 4, 2, 5, 4, 3, 2, 7, 4, 2, 10, 7, 4, 3, 5, 8, 5, 5, 2, 3, 3, 8, 6, 5, 6, 6, 6, 9, 3, 3, + 2, 6, 5, 4, 4, 4, 2, 5, 2, 8, 4, 3, 2, 2, 3, 3, 7, 5, 0, 7, 3, 5, 3, 3, 3, 6, 3, 3, 1, 3, 5, 7, + 5, 4, 5, 2, 4, 3, 7, 9, 2, 4, 2, 7, 6, 5, 3, 2, 6, 3, 6, 3, 5, 6, 3, 3, 3, 3, 3, 7, 5, 3, 4, 4, + 9, 5, 7, 9, 4, 9, 2, 4, 3, 1, 4, 6, 1, 3, 5, 5, 6, 4, 4, 2, 7, 7, 4, 5, 3, 1, 4, 5, 2, 4, 5, 2, + 7, 2, 11, 5, 4, 8, 6, 4, 3, 3, 6, 5, 4, 3, 4, 7, 2, 2, 2, 4, 3, 5, 4, 5, 3, 6, 5, 5, 2, 6, 1, + 11, 3, 3, 5, 5, 6, 2, 5, 3, 4, 5, 5, 7, 7, 7, 9, 3, 4, 6, 3, 3, 2, 6, 6, 1, 3, 1, 5, 7, 5, 7, + 8, 4, 5, 2, 6, 6, 5, 7, 5, 5, 6, 4, 2, 7, 6, 5, 5, 9, 4, 3, 3, 1, 1, 4, 5, 5, 6, 7, 2, 4, 6, 3, + 5, 5, 5, 4, 2, 4, 3, 3, 5, 2, 4, 4, 5, 6, 3, 6, 4, 5, 4, 5, 2, 8, 6, 5, 6, 7, 6, 2, 4, 9, 1, 3, + 5, 4, 7, 2, 5, 4, 7, 9, 2, 3, 2, 2, 7, 4, 1, 2, 6, 5, 10, 2, 4, 3, +]; + +// depth -> reorg probability from the Python reference. +#[rstest] +#[case(5, 1.58182730260265891863e-03)] +#[case(10, 1.67515743138728720072e-04)] +#[case(15, 2.80696481196116546684e-06)] +#[case(20, 6.84359796410981096872e-08)] +#[case(25, 1.46218662028857760238e-09)] +#[case(30, 4.62723254179747158594e-12)] +#[case(40, 3.53912692038794048900e-17)] +#[case(50, 1.37790735432279053542e-20)] +#[case(75, 2.40782990048672131651e-24)] +#[case(100, 3.21616912956779552478e-24)] +#[case(905, 1.)] +#[case(906, 1.)] +#[case(0, 1.)] +#[case(-1, 1.)] +fn test_calc_validator_prob_python_reference(#[case] depth: i64, #[case] want: f64) { + let chain = PYTHON_REFERENCE_CHAIN.as_slice(); + let current_epoch = chain.len() as i64 - 1; + let target_epoch = current_epoch - depth; + let got = calc_validator_prob( + chain, + TEST_FINALITY, + DEFAULT_BLOCKS_PER_EPOCH, + DEFAULT_BYZANTINE_FRACTION, + current_epoch, + target_epoch, + ) + .unwrap(); + let relative_error = (got - want).abs() / want.abs(); + assert_le!( + relative_error, + 1e-12, + "depth {depth}: got {got}, want {want}, relative error {relative_error}" + ); +} + +#[test] +fn test_calc_validator_prob_healthy_chain() { + // A perfectly healthy chain with 5 blocks per epoch should achieve + // 2^-30 finality well within 30 epochs + let chain = vec![5; 905]; + + let current_epoch = chain.len() as i64 - 1; + let guarantee = 2_f64.powi(-30); + + let prob30 = calc_validator_prob( + &chain, + TEST_FINALITY, + DEFAULT_BLOCKS_PER_EPOCH, + DEFAULT_BYZANTINE_FRACTION, + current_epoch, + current_epoch - 30, + ) + .unwrap(); + assert_lt!( + prob30, + guarantee, + "healthy chain at depth 30 should be below 2^-30" + ); + + let prob5 = calc_validator_prob( + &chain, + TEST_FINALITY, + DEFAULT_BLOCKS_PER_EPOCH, + DEFAULT_BYZANTINE_FRACTION, + current_epoch, + current_epoch - 5, + ) + .unwrap(); + assert_gt!( + prob5, + prob30, + "shallower depth should have higher reorg probability" + ); +} + +#[test] +fn test_calc_validator_prob_degraded_chain() { + // A degraded chain with only 2 blocks per epoch should have much worse + // finality than a healthy chain at the same depth + let chain = vec![2; 905]; + let current_epoch = chain.len() as i64 - 1; + let guarantee = 2_f64.powi(-30); + + let prob30 = calc_validator_prob( + &chain, + TEST_FINALITY, + DEFAULT_BLOCKS_PER_EPOCH, + DEFAULT_BYZANTINE_FRACTION, + current_epoch, + current_epoch - 30, + ) + .unwrap(); + assert_ge!( + prob30, + guarantee, + "degraded chain at depth 30 should NOT achieve 2^-30" + ); +} + +#[test] +fn test_find_threshold_depth_healthy_chain() { + let chain = vec![5; 905]; + let guarantee = 2_f64.powi(-30); + + let depth = find_threshold_depth( + &chain, + TEST_FINALITY, + DEFAULT_BLOCKS_PER_EPOCH, + DEFAULT_BYZANTINE_FRACTION, + guarantee, + ) + .unwrap(); + assert_gt!(depth, 0, "healthy chain should find a threshold"); + assert_lt!( + depth, + 35, + "healthy chain should finalize well before depth 35" + ); +} + +#[test] +fn test_find_threshold_depth_degraded_chain() { + // All-2s chain is too degraded to achieve 2^-30 within the bisect + // search range (BisectHigh=200), so threshold is not found + let chain = vec![2; 905]; + let guarantee = 2_f64.powi(-30); + + let depth = find_threshold_depth( + &chain, + TEST_FINALITY, + DEFAULT_BLOCKS_PER_EPOCH, + DEFAULT_BYZANTINE_FRACTION, + guarantee, + ) + .unwrap(); + assert_eq!( + depth, -1, + "severely degraded chain should not find threshold within search range" + ); +} + +#[test] +fn test_find_threshold_depth_mildly_degraded_chain() { + // All-3s chain is degraded but should still find a threshold, + // just deeper than a healthy chain + let chain = vec![3; 905]; + let guarantee = 2_f64.powi(-30); + + let depth = find_threshold_depth( + &chain, + TEST_FINALITY, + DEFAULT_BLOCKS_PER_EPOCH, + DEFAULT_BYZANTINE_FRACTION, + guarantee, + ) + .unwrap(); + assert_gt!( + depth, + 35, + "mildly degraded chain should require more depth than healthy" + ); + assert_lt!( + depth, + BISECT_HIGH, + "mildly degraded chain should still find a threshold" + ); +} + +#[rstest] +#[case(4)] +#[case(3)] +#[case(2)] +#[case(1)] +fn test_find_threshold_depth_too_short_chain(#[case] chain_len: usize) { + let chain = vec![3; chain_len]; + let guarantee = 2_f64.powi(-30); + let depth = find_threshold_depth( + &chain, + TEST_FINALITY, + DEFAULT_BLOCKS_PER_EPOCH, + DEFAULT_BYZANTINE_FRACTION, + guarantee, + ) + .unwrap(); + assert_eq!(depth, -1, "input chain is too short"); +} + +#[test] +fn test_find_threshold_depth_too_large_guarantee() { + let chain = vec![3; 200]; + let guarantee = 2_f64; + let depth = find_threshold_depth( + &chain, + TEST_FINALITY, + DEFAULT_BLOCKS_PER_EPOCH, + DEFAULT_BYZANTINE_FRACTION, + guarantee, + ) + .unwrap(); + assert_eq!(depth, BISECT_LOW, "guarantee is too large"); +} + +#[rstest] +#[case(0., -0.1, f64::NEG_INFINITY)] +#[case(0., -1., f64::NEG_INFINITY)] +#[case(0., 1., f64::NEG_INFINITY)] +#[case(0., 0.1, f64::NEG_INFINITY)] +#[case(0., 0., 0.)] +fn poisson_log_prob_tests(#[case] lambda: f64, #[case] x: f64, #[case] want: f64) { + let got = poisson_log_prob(lambda, x); + assert_eq!(got, want); +} diff --git a/src/chain/ec_finality/mod.rs b/src/chain/ec_finality/mod.rs new file mode 100644 index 000000000000..d4b92acfefdd --- /dev/null +++ b/src/chain/ec_finality/mod.rs @@ -0,0 +1,4 @@ +// Copyright 2019-2026 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +mod calculator; diff --git a/src/chain/mod.rs b/src/chain/mod.rs index b16679c41bec..a2e3d56cdda4 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -1,6 +1,7 @@ // Copyright 2019-2026 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT +mod ec_finality; mod snapshot_format; pub mod store; #[cfg(test)]