diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup.hpp index 84c590b4a12d..0caccd17affe 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup.hpp @@ -320,8 +320,14 @@ template class element { }; /** - * We can chain repeated point additions together, where we only require 2 non-native field multiplications per - * point addition, instead of 3 + * @brief Optimized chained addition for non-infinity points. + * + * @pre p1 and p2 must NOT be point at infinity. Use operator+ for general addition. + * @pre p1.x ≠ p2.x for all points in the chain (required for the incomplete addition formula used in this method). + * + * @details We can chain repeated point additions together, where we only require 2 non-native field multiplications + * per point addition, instead of 3 + * * NOTE: These must remain public as they are used by nested structs like batch_lookup_table_plookup **/ static chain_add_accumulator chain_add_start(const element& p1, const element& p2); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup_nafs.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup_nafs.hpp index e9dd554dba0e..513a932621d0 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup_nafs.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup_nafs.hpp @@ -18,6 +18,8 @@ std::pair element::get_staggered_wnaf_fragment_val bool is_negative, bool wnaf_skew) { + BB_ASSERT_LT(stagger, 32ULL, "biggroup_nafs: stagger value ≥ 32"); + // If there is no stagger then there is no need to change anything if (stagger == 0) { return std::make_pair(0, wnaf_skew); @@ -367,16 +369,16 @@ typename element::secp256k1_wnaf_pair element::compu bool khi_negative = false; secp256k1::fr::split_into_endomorphism_scalars(k.from_montgomery_form(), klo, khi); - // The low and high scalars must be less than 2^129 in absolute value. In some cases, the khi value - // is returned as negative, in which case we negate it and set a flag to indicate this. This is because - // we decompose the scalar as: - // k = klo + ζ * khi (mod n) - // = klo - λ * khi (mod n) - // where λ is the cube root of unity. If khi is negative, then -λ * khi is positive, and vice versa. + // The low and high scalars must be less than 2^129 in absolute value. In some cases, the klo or khi value + // is returned as negative, in which case we negate it and set a flag to indicate this. if (khi.uint256_t_no_montgomery_conversion().get_msb() >= 129) { khi_negative = true; khi = -khi; } + if (klo.uint256_t_no_montgomery_conversion().get_msb() >= 129) { + klo_negative = true; + klo = -klo; + } BB_ASSERT_LT(klo.uint256_t_no_montgomery_conversion().get_msb(), 129ULL, "biggroup_nafs: klo > 129 bits"); BB_ASSERT_LT(khi.uint256_t_no_montgomery_conversion().get_msb(), 129ULL, "biggroup_nafs: khi > 129 bits");