diff --git a/barretenberg/cpp/scripts/test_civc_standalone_vks_havent_changed.sh b/barretenberg/cpp/scripts/test_civc_standalone_vks_havent_changed.sh index 4cb206606d67..485f010abdfb 100755 --- a/barretenberg/cpp/scripts/test_civc_standalone_vks_havent_changed.sh +++ b/barretenberg/cpp/scripts/test_civc_standalone_vks_havent_changed.sh @@ -13,7 +13,7 @@ cd .. # - Generate a hash for versioning: sha256sum bb-civc-inputs.tar.gz # - Upload the compressed results: aws s3 cp bb-civc-inputs.tar.gz s3://aztec-ci-artifacts/protocol/bb-civc-inputs-[hash(0:8)].tar.gz # Note: In case of the "Test suite failed to run ... Unexpected token 'with' " error, need to run: docker pull aztecprotocol/build:3.0 -pinned_short_hash="5aa55c9f" +pinned_short_hash="04f38201" pinned_civc_inputs_url="https://aztec-ci-artifacts.s3.us-east-2.amazonaws.com/protocol/bb-civc-inputs-${pinned_short_hash}.tar.gz" function compress_and_upload { diff --git a/barretenberg/cpp/src/barretenberg/circuit_checker/ultra_circuit_checker.cpp b/barretenberg/cpp/src/barretenberg/circuit_checker/ultra_circuit_checker.cpp index dca6917b51d9..752a37d04356 100644 --- a/barretenberg/cpp/src/barretenberg/circuit_checker/ultra_circuit_checker.cpp +++ b/barretenberg/cpp/src/barretenberg/circuit_checker/ultra_circuit_checker.cpp @@ -22,8 +22,9 @@ UltraCircuitBuilder_ UltraCircuitChecker::prepare_cir { // Create a copy of the input circuit UltraCircuitBuilder_ builder{ builder_in }; - - builder.finalize_circuit(/*ensure_nonzero=*/true); // Test the ensure_nonzero gates as well + if (!builder.circuit_finalized) { // avoid warnings about finalizing an already finalized circuit + builder.finalize_circuit(/*ensure_nonzero=*/true); // Test the ensure_nonzero gates as well + } return builder; } diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.cpp index 9280060ecf77..50baded5cd78 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.cpp @@ -1271,10 +1271,13 @@ template field_t field_t::accumulate(const * @brief Splits the field element into (lo, hi), where: * - lo contains bits [0, lsb_index) * - hi contains bits [lsb_index, num_bits) + * @details Max bits is specified as an argument, and must be <= grumpkin::MAX_NO_WRAP_INTEGER_BIT_LENGTH (to ensure no + * modular wrap). + * */ template -std::pair, field_t> field_t::split_at(const size_t lsb_index, - const size_t num_bits) const +std::pair, field_t> field_t::no_wrap_split_at(const size_t lsb_index, + const size_t num_bits) const { ASSERT(lsb_index < num_bits); ASSERT(num_bits <= grumpkin::MAX_NO_WRAP_INTEGER_BIT_LENGTH); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.hpp index 3452000349ed..e26a2a82f509 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.hpp @@ -396,7 +396,7 @@ template class field_t { Builder* get_context() const { return context; } - std::pair, field_t> split_at( + std::pair, field_t> no_wrap_split_at( const size_t lsb_index, const size_t num_bits = grumpkin::MAX_NO_WRAP_INTEGER_BIT_LENGTH) const; bool_t is_zero() const; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.test.cpp index e551b5d7f657..f9be2d2fe3b4 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field.test.cpp @@ -930,7 +930,7 @@ template class stdlib_field : public testing::Test { // Lambda to check split_at functionality auto check_split_at = [&](const field_ct& a, size_t start, size_t num_bits) { const uint256_t a_native = a.get_value(); - auto split_data = a.split_at(start, num_bits); + auto split_data = a.no_wrap_split_at(start, num_bits); EXPECT_EQ(split_data.first.get_value(), a_native & ((uint256_t(1) << start) - 1)); EXPECT_EQ(split_data.second.get_value(), (a_native >> start) & ((uint256_t(1) << num_bits) - 1)); @@ -1424,7 +1424,7 @@ template class stdlib_field : public testing::Test { // Split preserves tags const size_t num_bits = uint256_t(a.get_value()).get_msb() + 1; - auto split_data = a.split_at(num_bits / 2, num_bits); + auto split_data = a.no_wrap_split_at(num_bits / 2, num_bits); EXPECT_EQ(split_data.first.get_origin_tag(), submitted_value_origin_tag); EXPECT_EQ(split_data.second.get_origin_tag(), submitted_value_origin_tag); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field_utils.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field_utils.cpp new file mode 100644 index 000000000000..6bc99a3ecbc1 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field_utils.cpp @@ -0,0 +1,109 @@ +// === AUDIT STATUS === +// internal: { status: not started, auditors: [], date: YYYY-MM-DD } +// external_1: { status: not started, auditors: [], date: YYYY-MM-DD } +// external_2: { status: not started, auditors: [], date: YYYY-MM-DD } +// ===================== + +#include "./field_utils.hpp" +#include "./field.hpp" +#include "barretenberg/stdlib/primitives/circuit_builders/circuit_builders.hpp" + +namespace bb::stdlib { + +template +void validate_split_in_field(const field_t& lo, + const field_t& hi, + const size_t lo_bits, + const uint256_t& field_modulus) +{ + const size_t hi_bits = static_cast(field_modulus.get_msb()) + 1 - lo_bits; + + // Split the field modulus at the same position + const uint256_t r_lo = field_modulus.slice(0, lo_bits); + const uint256_t r_hi = field_modulus.slice(lo_bits, field_modulus.get_msb() + 1); + + // Check if we need to borrow + bool need_borrow = uint256_t(lo.get_value()) > r_lo; + field_t borrow = + lo.is_constant() ? need_borrow : field_t::from_witness(lo.get_context(), need_borrow); + + // directly call `create_new_range_constraint` to avoid creating an arithmetic gate + if (!lo.is_constant()) { + // We need to manually propagate the origin tag + borrow.set_origin_tag(lo.get_origin_tag()); + lo.get_context()->create_new_range_constraint(borrow.get_witness_index(), 1, "borrow"); + } + + // Hi range check = r_hi - hi - borrow + // Lo range check = r_lo - lo + borrow * 2^lo_bits + field_t hi_diff = (-hi + r_hi) - borrow; + field_t lo_diff = (-lo + r_lo) + (borrow * (uint256_t(1) << lo_bits)); + + hi_diff.create_range_constraint(hi_bits); + lo_diff.create_range_constraint(lo_bits); +} + +template +std::pair, field_t> split_unique(const field_t& field, + const size_t lo_bits, + const bool skip_range_constraints) +{ + using native = typename field_t::native; + static constexpr size_t max_bits = native::modulus.get_msb() + 1; + ASSERT(lo_bits < max_bits); + + const uint256_t value(field.get_value()); + const uint256_t lo_val = value.slice(0, lo_bits); + const uint256_t hi_val = value.slice(lo_bits, max_bits); + + Builder* ctx = field.get_context(); + + // If `field` is constant, return constants based on the native hi/lo values + if (field.is_constant()) { + return { field_t(lo_val), field_t(hi_val) }; + } + + // Create hi/lo witnesses + auto lo = field_t::from_witness(ctx, lo_val); + auto hi = field_t::from_witness(ctx, hi_val); + + // Component 1: Reconstruction constraint lo + hi * 2^lo_bits - field == 0 + const uint256_t shift = uint256_t(1) << lo_bits; + auto zero = field_t::from_witness_index(ctx, ctx->zero_idx); + field_t::evaluate_linear_identity(lo, hi * shift, -field, zero); + + // Set the origin tag for the limbs + lo.set_origin_tag(field.get_origin_tag()); + hi.set_origin_tag(field.get_origin_tag()); + + // Component 2: Field validation against bn254 scalar field modulus + validate_split_in_field(lo, hi, lo_bits, native::modulus); + + // Component 3: Range constraints (unless skipped) + if (!skip_range_constraints) { + lo.create_range_constraint(lo_bits); + // For bn254 scalar field, hi_bits = 254 - lo_bits + const size_t hi_bits = 254 - lo_bits; + hi.create_range_constraint(hi_bits); + } + + return { lo, hi }; +} + +// Explicit instantiations for split_unique +template std::pair, field_t> split_unique( + const field_t& field, const size_t lo_bits, const bool skip_range_constraints); +template std::pair, field_t> split_unique( + const field_t& field, const size_t lo_bits, const bool skip_range_constraints); + +// Explicit instantiations for validate_split_in_field +template void validate_split_in_field(const field_t& lo, + const field_t& hi, + const size_t lo_bits, + const uint256_t& field_modulus); +template void validate_split_in_field(const field_t& lo, + const field_t& hi, + const size_t lo_bits, + const uint256_t& field_modulus); + +} // namespace bb::stdlib diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field_utils.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field_utils.hpp new file mode 100644 index 000000000000..9b54b750545e --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field_utils.hpp @@ -0,0 +1,52 @@ +// === AUDIT STATUS === +// internal: { status: not started, auditors: [], date: YYYY-MM-DD } +// external_1: { status: not started, auditors: [], date: YYYY-MM-DD } +// external_2: { status: not started, auditors: [], date: YYYY-MM-DD } +// ===================== + +#pragma once + +#include "./field.hpp" +#include + +namespace bb::stdlib { + +template class field_t; + +/** + * @brief Split a bn254 scalar field element into unique lo and hi limbs + * + * @details Splits `field` into a low and high limb at the given bit index with: + * 1. Reconstruction constraint: lo + hi * 2^lo_bits = field + * 2. Modulus check: lo + hi * 2^lo_bits < bn254::ScalarField::modulus + * 3. Range constraints: lo in [0, 2^lo_bits), hi in [0, 2^(254-lo_bits)) (unless skip_range_constraints = true) + * + * @note The combination of (2) and (3) establishes the uniqueness of the decomposition. + * + * @param field The bn254 scalar field element to split + * @param lo_bits Number of bits for the low limb + * @param skip_range_constraints If true, skip range constraints (use when they're implicit, e.g., in lookups) + * @return std::pair, field_t> The (lo, hi) pair + */ +template +std::pair, field_t> split_unique(const field_t& field, + const size_t lo_bits, + const bool skip_range_constraints = false); + +/** + * @brief Validates that lo + hi * 2^lo_bits < field_modulus + * @details Can be used in conjunction with range constraints on lo and hi to establish a unique decomposition of a + * field element. + * + * @param lo The low limb + * @param hi The high limb + * @param lo_bits The bit position at which the split occurred + * @param field_modulus The field modulus to validate against + */ +template +void validate_split_in_field(const field_t& lo, + const field_t& hi, + const size_t lo_bits, + const uint256_t& field_modulus); + +} // namespace bb::stdlib diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp index 460a7d519de2..de7a3055e4ae 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.test.cpp @@ -53,6 +53,18 @@ template class CycleGroupTest : public ::testing::Test { using CircuitTypes = ::testing::Types; TYPED_TEST_SUITE(CycleGroupTest, CircuitTypes); +// Utility function for gate count checking and circuit verification +template void check_circuit_and_gates(Builder& builder, uint32_t expected_gates) +{ + if (!builder.circuit_finalized) { + builder.finalize_circuit(/*ensure_nonzero=*/false); + } + uint32_t actual_gates = static_cast(builder.get_num_finalized_gates()); + EXPECT_EQ(actual_gates, expected_gates) + << "Gate count changed! Expected: " << expected_gates << ", Actual: " << actual_gates; + EXPECT_TRUE(CircuitChecker::check(builder)); +} + STANDARD_TESTING_TAGS /** * @brief Check basic tag interactions @@ -97,7 +109,7 @@ TYPED_TEST(CycleGroupTest, TestInfConstantWintnessRegression) cycle_group_ct a = cycle_group_ct::from_constant_witness(&builder, lhs); (void)a; EXPECT_FALSE(builder.failed()); - EXPECT_TRUE(CircuitChecker::check(builder)); + check_circuit_and_gates(builder, 1); } /** @@ -113,7 +125,7 @@ TYPED_TEST(CycleGroupTest, TestInfWintnessRegression) cycle_group_ct a = cycle_group_ct::from_witness(&builder, lhs); (void)a; EXPECT_FALSE(builder.failed()); - EXPECT_TRUE(CircuitChecker::check(builder)); + check_circuit_and_gates(builder, 7); } /** @@ -152,7 +164,7 @@ TYPED_TEST(CycleGroupTest, TestOperatorNegRegression) cycle_group_ct c = a.unconditional_add(b); (void)c; EXPECT_FALSE(builder.failed()); - EXPECT_TRUE(CircuitChecker::check(builder)); + check_circuit_and_gates(builder, 16); } /** @@ -175,7 +187,7 @@ TYPED_TEST(CycleGroupTest, TestConstantWitnessMixupRegression) auto w27 = w10 - w11; // and here (void)w26; (void)w27; - EXPECT_NO_THROW(CircuitChecker::check(builder)); // It won't be a throw anyway + check_circuit_and_gates(builder, 42); } /** @@ -191,7 +203,7 @@ TYPED_TEST(CycleGroupTest, TestConditionalAssignRegression) auto c1 = cycle_group_ct::conditional_assign(bool_ct(witness_ct(&builder, false)), c0, c0); auto w3 = c1.dbl(); (void)w3; - EXPECT_NO_THROW(CircuitChecker::check(builder)); // It won't be a throw anyway + check_circuit_and_gates(builder, 2); } /** @@ -211,7 +223,7 @@ TYPED_TEST(CycleGroupTest, TestConditionalAssignSuperMixupRegression) EXPECT_TRUE(w2.is_point_at_infinity().is_constant()); auto w3 = w2.dbl(); (void)w3; - EXPECT_NO_THROW(CircuitChecker::check(builder)); // It won't be a throw anyway + check_circuit_and_gates(builder, 6); } /** @@ -227,7 +239,7 @@ TYPED_TEST(CycleGroupTest, TestValidateOnCurveSucceed) cycle_group_ct a = cycle_group_ct::from_witness(&builder, lhs); a.validate_is_on_curve(); EXPECT_FALSE(builder.failed()); - EXPECT_TRUE(CircuitChecker::check(builder)); + check_circuit_and_gates(builder, 12); } /** @@ -246,7 +258,7 @@ TYPED_TEST(CycleGroupTest, TestValidateOnCurveInfinitySucceed) cycle_group_ct a(x, y, /*_is_infinity=*/true); // marks this point as the point at infinity a.validate_is_on_curve(); EXPECT_FALSE(builder.failed()); - EXPECT_TRUE(CircuitChecker::check(builder)); + check_circuit_and_gates(builder, 1); } /** @@ -367,7 +379,7 @@ TYPED_TEST(CycleGroupTest, TestStandardForm) EXPECT_EQ(standard_f_x, 0); EXPECT_EQ(standard_f_y, 0); - EXPECT_TRUE(CircuitChecker::check(builder)); + check_circuit_and_gates(builder, 16); } TYPED_TEST(CycleGroupTest, TestDbl) { @@ -393,8 +405,7 @@ TYPED_TEST(CycleGroupTest, TestDbl) EXPECT_EQ(result, expected); EXPECT_EQ(d.get_value(), expected); - bool proof_result = CircuitChecker::check(builder); - EXPECT_EQ(proof_result, true); + check_circuit_and_gates(builder, 16); // Ensure the tags stay the same after doubling EXPECT_EQ(c.get_origin_tag(), submitted_value_origin_tag); @@ -426,8 +437,7 @@ TYPED_TEST(CycleGroupTest, TestUnconditionalAdd) add(TestFixture::generators[0], TestFixture::generators[1], true, false); add(TestFixture::generators[0], TestFixture::generators[1], true, true); - bool proof_result = CircuitChecker::check(builder); - EXPECT_EQ(proof_result, true); + check_circuit_and_gates(builder, 35); } TYPED_TEST(CycleGroupTest, TestConstrainedUnconditionalAddSucceed) @@ -446,8 +456,7 @@ TYPED_TEST(CycleGroupTest, TestConstrainedUnconditionalAddSucceed) AffineElement result = c.get_value(); EXPECT_EQ(result, expected); - bool proof_result = CircuitChecker::check(builder); - EXPECT_EQ(proof_result, true); + check_circuit_and_gates(builder, 17); } TYPED_TEST(CycleGroupTest, TestConstrainedUnconditionalAddFail) @@ -464,9 +473,8 @@ TYPED_TEST(CycleGroupTest, TestConstrainedUnconditionalAddFail) a.checked_unconditional_add(b); EXPECT_TRUE(builder.failed()); - - bool proof_result = CircuitChecker::check(builder); - EXPECT_EQ(proof_result, false); + // No gate count check for failing test + EXPECT_FALSE(CircuitChecker::check(builder)); } TYPED_TEST(CycleGroupTest, TestAdd) @@ -561,8 +569,7 @@ TYPED_TEST(CycleGroupTest, TestAdd) EXPECT_EQ(c.get_origin_tag(), first_two_merged_tag); } - bool proof_result = CircuitChecker::check(builder); - EXPECT_EQ(proof_result, true); + check_circuit_and_gates(builder, 268); } TYPED_TEST(CycleGroupTest, TestUnconditionalSubtract) @@ -591,8 +598,7 @@ TYPED_TEST(CycleGroupTest, TestUnconditionalSubtract) subtract(TestFixture::generators[0], TestFixture::generators[1], true, false); subtract(TestFixture::generators[0], TestFixture::generators[1], true, true); - bool proof_result = CircuitChecker::check(builder); - EXPECT_EQ(proof_result, true); + check_circuit_and_gates(builder, 35); } TYPED_TEST(CycleGroupTest, TestConstrainedUnconditionalSubtractSucceed) @@ -611,8 +617,7 @@ TYPED_TEST(CycleGroupTest, TestConstrainedUnconditionalSubtractSucceed) AffineElement result = c.get_value(); EXPECT_EQ(result, expected); - bool proof_result = CircuitChecker::check(builder); - EXPECT_EQ(proof_result, true); + check_circuit_and_gates(builder, 17); } TYPED_TEST(CycleGroupTest, TestConstrainedUnconditionalSubtractFail) @@ -629,9 +634,8 @@ TYPED_TEST(CycleGroupTest, TestConstrainedUnconditionalSubtractFail) a.checked_unconditional_subtract(b); EXPECT_TRUE(builder.failed()); - - bool proof_result = CircuitChecker::check(builder); - EXPECT_EQ(proof_result, false); + // No gate count check for failing test + EXPECT_FALSE(CircuitChecker::check(builder)); } TYPED_TEST(CycleGroupTest, TestSubtract) @@ -729,8 +733,7 @@ TYPED_TEST(CycleGroupTest, TestSubtract) EXPECT_EQ(c.get_origin_tag(), first_two_merged_tag); } - bool proof_result = CircuitChecker::check(builder); - EXPECT_EQ(proof_result, true); + check_circuit_and_gates(builder, 274); } /** @@ -797,8 +800,7 @@ TYPED_TEST(CycleGroupTest, TestBatchMulGeneralMSM) // The tag should the union of all tags EXPECT_EQ(result.get_origin_tag(), expected_tag); - bool check_result = CircuitChecker::check(builder); - EXPECT_EQ(check_result, true); + check_circuit_and_gates(builder, 4397); } TYPED_TEST(CycleGroupTest, TestBatchMulProducesInfinity) @@ -825,8 +827,7 @@ TYPED_TEST(CycleGroupTest, TestBatchMulProducesInfinity) EXPECT_EQ(result.get_origin_tag(), expected_tag); - bool check_result = CircuitChecker::check(builder); - EXPECT_EQ(check_result, true); + check_circuit_and_gates(builder, 4023); } TYPED_TEST(CycleGroupTest, TestBatchMulMultiplyByZero) @@ -848,8 +849,7 @@ TYPED_TEST(CycleGroupTest, TestBatchMulMultiplyByZero) EXPECT_TRUE(result.is_point_at_infinity().get_value()); EXPECT_EQ(result.get_origin_tag(), expected_tag); - bool check_result = CircuitChecker::check(builder); - EXPECT_EQ(check_result, true); + check_circuit_and_gates(builder, 3533); } TYPED_TEST(CycleGroupTest, TestBatchMulInputsAreInfinity) @@ -884,8 +884,7 @@ TYPED_TEST(CycleGroupTest, TestBatchMulInputsAreInfinity) EXPECT_TRUE(result.is_point_at_infinity().get_value()); EXPECT_EQ(result.get_origin_tag(), expected_tag); - bool check_result = CircuitChecker::check(builder); - EXPECT_EQ(check_result, true); + check_circuit_and_gates(builder, 3557); } TYPED_TEST(CycleGroupTest, TestBatchMulFixedBaseInLookupTable) @@ -923,8 +922,7 @@ TYPED_TEST(CycleGroupTest, TestBatchMulFixedBaseInLookupTable) EXPECT_EQ(result.get_value(), crypto::pedersen_commitment::commit_native(scalars_native)); EXPECT_EQ(result.get_origin_tag(), expected_tag); - bool check_result = CircuitChecker::check(builder); - EXPECT_EQ(check_result, true); + check_circuit_and_gates(builder, 2823); } TYPED_TEST(CycleGroupTest, TestBatchMulFixedBaseSomeInLookupTable) @@ -969,8 +967,7 @@ TYPED_TEST(CycleGroupTest, TestBatchMulFixedBaseSomeInLookupTable) EXPECT_EQ(result.get_value(), AffineElement(expected)); EXPECT_EQ(result.get_origin_tag(), expected_tag); - bool check_result = CircuitChecker::check(builder); - EXPECT_EQ(check_result, true); + check_circuit_and_gates(builder, 3399); } TYPED_TEST(CycleGroupTest, TestBatchMulFixedBaseZeroScalars) @@ -1000,8 +997,7 @@ TYPED_TEST(CycleGroupTest, TestBatchMulFixedBaseZeroScalars) EXPECT_EQ(result.is_point_at_infinity().get_value(), true); EXPECT_EQ(result.get_origin_tag(), expected_tag); - bool check_result = CircuitChecker::check(builder); - EXPECT_EQ(check_result, true); + check_circuit_and_gates(builder, 2838); } TYPED_TEST(CycleGroupTest, TestMul) @@ -1046,8 +1042,7 @@ TYPED_TEST(CycleGroupTest, TestMul) } } - bool proof_result = CircuitChecker::check(builder); - EXPECT_EQ(proof_result, true); + check_circuit_and_gates(builder, 6598); } TYPED_TEST(CycleGroupTest, TestOne) @@ -1088,7 +1083,7 @@ TYPED_TEST(CycleGroupTest, TestConversionFromBigfield) if (construct_witnesses) { EXPECT_FALSE(big_elt.is_constant()); EXPECT_FALSE(scalar_from_big_elt.is_constant()); - EXPECT_TRUE(CircuitChecker::check(builder)); + check_circuit_and_gates(builder, 3499); } }; run_test(/*construct_witnesses=*/true); @@ -1130,7 +1125,7 @@ TYPED_TEST(CycleGroupTest, TestBatchMulIsConsistent) // TODO(https://github.com/AztecProtocol/barretenberg/issues/1020): Re-enable these. // EXPECT_FALSE(result1.is_constant()); // EXPECT_FALSE(result2.is_constant()); - EXPECT_TRUE(CircuitChecker::check(builder)); + check_circuit_and_gates(builder, 5289); } }; run_test(/*construct_witnesses=*/true); @@ -1206,6 +1201,6 @@ TYPED_TEST(CycleGroupTest, TestFixedBaseBatchMul) EXPECT_EQ(result.get_value(), expected); - EXPECT_TRUE(CircuitChecker::check(builder)); + check_circuit_and_gates(builder, 2909); } #pragma GCC diagnostic pop diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_scalar.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_scalar.cpp index b3bd179f8d06..43df8fc41bde 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_scalar.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_scalar.cpp @@ -8,6 +8,7 @@ #include "./cycle_group.hpp" #include "barretenberg/common/assert.hpp" #include "barretenberg/stdlib/primitives/circuit_builders/circuit_builders.hpp" +#include "barretenberg/stdlib/primitives/field/field_utils.hpp" namespace bb::stdlib { @@ -17,27 +18,6 @@ cycle_scalar::cycle_scalar(const field_t& _lo, const field_t& _hi) , hi(_hi) {} -template cycle_scalar::cycle_scalar(const field_t& in) -{ - const uint256_t value(in.get_value()); - const auto [lo_v, hi_v] = decompose_into_lo_hi(value); - constexpr uint256_t shift = uint256_t(1) << LO_BITS; - if (in.is_constant()) { - lo = lo_v; - hi = hi_v; - } else { - lo = witness_t(in.get_context(), lo_v); - hi = witness_t(in.get_context(), hi_v); - (lo + hi * shift).assert_equal(in); - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1022): ensure lo and hi are in bb::fr modulus not - // bb::fq modulus otherwise we could have two representations for in - validate_scalar_is_in_field(); - } - // We need to manually propagate the origin tag - lo.set_origin_tag(in.get_origin_tag()); - hi.set_origin_tag(in.get_origin_tag()); -} - template cycle_scalar::cycle_scalar(const ScalarField& in) { const uint256_t value(in); @@ -73,44 +53,34 @@ cycle_scalar cycle_scalar::from_u256_witness(Builder* context, const size_t num_bits = 256; const uint256_t lo_v = bitstring.slice(0, LO_BITS); const uint256_t hi_v = bitstring.slice(LO_BITS, num_bits); - field_t lo = witness_t(context, lo_v); - field_t hi = witness_t(context, hi_v); - lo.set_free_witness_tag(); - hi.set_free_witness_tag(); - cycle_scalar result{ lo, hi, num_bits, true, false }; - return result; + auto lo = field_t::from_witness(context, lo_v); + auto hi = field_t::from_witness(context, hi_v); + return cycle_scalar{ + lo, hi, num_bits, /*skip_primality_test=*/true, /*use_bn254_scalar_field_for_primality_test=*/false + }; } /** - * @brief Use when we want to multiply a group element by a string of bits of known size. - * N.B. using this constructor method will make our scalar multiplication methods not perform primality tests. + * @brief Construct a cycle scalar (grumpkin scalar field element) from a bn254 scalar field element + * @details This method ensures that the input is constrained to be less than the bn254 scalar field modulus. * * @tparam Builder - * @param context - * @param value - * @param num_bits - * @return cycle_scalar + * @param in a field_t representing a bn254 scalar field element + * @return cycle_scalar a cycle_scalar representing the same value in the grumpkin scalar field */ -template -cycle_scalar cycle_scalar::create_from_bn254_scalar(const field_t& in, const bool skip_primality_test) +template cycle_scalar cycle_scalar::create_from_bn254_scalar(const field_t& in) { - const uint256_t value_u256(in.get_value()); - const auto [lo_v, hi_v] = decompose_into_lo_hi(value_u256); - if (in.is_constant()) { - cycle_scalar result{ field_t(lo_v), field_t(hi_v), NUM_BITS, false, true }; - return result; - } - field_t lo = witness_t(in.get_context(), lo_v); - field_t hi = witness_t(in.get_context(), hi_v); - lo.add_two(hi * (uint256_t(1) << LO_BITS), -in).assert_equal(0); - - // We need to manually propagate the origin tag - lo.set_origin_tag(in.get_origin_tag()); - hi.set_origin_tag(in.get_origin_tag()); - - cycle_scalar result{ lo, hi, NUM_BITS, skip_primality_test, true }; - return result; + // Use split_unique with skip_range_constraints=true since the range constraints are implicit + // in the lookup arguments used in scalar multiplication and thus do not need to be applied here. + auto [lo, hi] = split_unique(in, LO_BITS, /*skip_range_constraints=*/true); + // AUDITTODO: we skip the primality test in the constructor here since its done in split_unique. Eventually + // the skip_primality_test logic will be removed entirely and constraints will always be applied immediately on + // construction. + return cycle_scalar{ + lo, hi, NUM_BITS, /*skip_primality_test=*/true, /*use_bn254_scalar_field_for_primality_test=*/true + }; } + /** * @brief Construct a new cycle scalar from a bigfield _value, over the same ScalarField Field. If _value is a witness, * we add constraints to ensure the conversion is correct by reconstructing a bigfield from the limbs of the @@ -181,7 +151,7 @@ template cycle_scalar::cycle_scalar(BigScalarField& const uint64_t hi_bits = hi_max.get_msb() + 1; lo.create_range_constraint(BigScalarField::NUM_LIMB_BITS); hi.create_range_constraint(static_cast(hi_bits)); - limb0.assert_equal(lo + hi * BigScalarField::shift_1); + limb0.assert_equal(lo + (hi * BigScalarField::shift_1)); limb1 += hi; limb1_max += hi_max; @@ -213,7 +183,7 @@ template cycle_scalar::cycle_scalar(BigScalarField& // We need to propagate the origin tag to the chunks of limb1 limb_1_lo.set_origin_tag(limb1.get_origin_tag()); limb_1_hi.set_origin_tag(limb1.get_origin_tag()); - limb1.assert_equal(limb_1_hi * limb_1_hi_multiplicand + limb_1_lo); + limb1.assert_equal((limb_1_hi * limb_1_hi_multiplicand) + limb_1_lo); // Step 4: apply range constraints to validate both slices represent the expected contributions to *this.lo and // *this,hi @@ -243,50 +213,20 @@ template bool cycle_scalar::is_constant() const } /** - * @brief Checks that a cycle_scalar value is smaller than a prime field modulus when evaluated over the INTEGERS - * N.B. The prime we check can be either the SNARK curve group order or the circuit's embedded curve group order - * (i.e. BN254 or Grumpkin) - * For a canonical scalar mul, we check against the embedded curve (i.e. the curve - * cycle_group implements). - * HOWEVER: for Pedersen hashes and Pedersen commitments, the hashed/committed data will be - * native circuit field elements i.e. for a BN254 snark, cycle_group = Grumpkin and we will be committing/hashing - * BN254::ScalarField values *NOT* Grumpkin::ScalarFIeld values. - * TLDR: whether the input scalar has to be < BN254::ScalarField or < Grumpkin::ScalarField is context-dependent. + * @brief Validates that the scalar (lo + hi * 2^LO_BITS) is less than the appropriate field modulus + * @details Checks against either bn254 scalar field or grumpkin scalar field based on internal flags. + * If _skip_primality_test is true, no validation is performed. + * @note: Implies (lo + hi * 2^LO_BITS) < field_modulus as integers when combined with appropriate range constraints on + * lo and hi. * * @tparam Builder */ template void cycle_scalar::validate_scalar_is_in_field() const { - using FF = typename field_t::native; - constexpr bool IS_ULTRA = Builder::CIRCUIT_TYPE == CircuitType::ULTRA; - - // AUDITTODO: Investigate using field_t::split_at here per Sergei's suggestion - if (!is_constant() && !skip_primality_test()) { - // Check that scalar.hi * 2^LO_BITS + scalar.lo < cycle_group_modulus when evaluated over the integers - const uint256_t cycle_group_modulus = - use_bn254_scalar_field_for_primality_test() ? FF::modulus : ScalarField::modulus; - const auto [r_lo, r_hi] = decompose_into_lo_hi(cycle_group_modulus); - - bool need_borrow = uint256_t(lo.get_value()) > r_lo; - field_t borrow = lo.is_constant() ? need_borrow : field_t::from_witness(get_context(), need_borrow); - - // directly call `create_new_range_constraint` to avoid creating an arithmetic gate - if (!lo.is_constant()) { - // We need to manually propagate the origin tag - borrow.set_origin_tag(lo.get_origin_tag()); - if constexpr (IS_ULTRA) { - get_context()->create_new_range_constraint(borrow.get_witness_index(), 1, "borrow"); - } else { - borrow.assert_equal(borrow * borrow); - } - } - // Hi range check = r_hi - y_hi - borrow - // Lo range check = r_lo - y_lo + borrow * 2^{126} - field_t hi_diff = (-hi + r_hi) - borrow; - field_t lo_diff = (-lo + r_lo) + (borrow * (uint256_t(1) << LO_BITS)); - - hi_diff.create_range_constraint(HI_BITS); - lo_diff.create_range_constraint(LO_BITS); + if (!_skip_primality_test) { + const uint256_t& field_modulus = + _use_bn254_scalar_field_for_primality_test ? field_t::native::modulus : ScalarField::modulus; + validate_split_in_field(lo, hi, LO_BITS, field_modulus); } } diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_scalar.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_scalar.hpp index f0e08619f601..45992d7dff6d 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_scalar.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_scalar.hpp @@ -38,7 +38,7 @@ template class cycle_scalar { using ScalarField = typename Curve::ScalarField; using BigScalarField = stdlib::bigfield; - static constexpr size_t NUM_BITS = ScalarField::modulus.get_msb() + 1; + static constexpr size_t NUM_BITS = ScalarField::modulus.get_msb() + 1; // equivalent for both bn254 and grumpkin static constexpr size_t LO_BITS = field_t::native::Params::MAX_BITS_PER_ENDOMORPHISM_SCALAR; static constexpr size_t HI_BITS = NUM_BITS - LO_BITS; @@ -63,7 +63,6 @@ template class cycle_scalar { return { value.slice(0, LO_BITS), value.slice(LO_BITS, NUM_BITS) }; } - public: cycle_scalar(const field_t& _lo, const field_t& _hi, const size_t bits, @@ -74,12 +73,16 @@ template class cycle_scalar { , _num_bits(bits) , _skip_primality_test(skip_primality_test) , _use_bn254_scalar_field_for_primality_test(use_bn254_scalar_field_for_primality_test) {}; + + public: + // AUDITTODO: this is used only in the fuzzer. cycle_scalar(const ScalarField& _in = 0); cycle_scalar(const field_t& _lo, const field_t& _hi); - cycle_scalar(const field_t& _in); + // AUDITTODO: this is used only in the fuzzer. Its not inherently problematic, but perhaps the fuzzer should use a + // production entrypoint. static cycle_scalar from_witness(Builder* context, const ScalarField& value); static cycle_scalar from_u256_witness(Builder* context, const uint256_t& bitstring); - static cycle_scalar create_from_bn254_scalar(const field_t& _in, bool skip_primality_test = false); + static cycle_scalar create_from_bn254_scalar(const field_t& _in); [[nodiscard]] bool is_constant() const; ScalarField get_value() const; Builder* get_context() const { return lo.get_context() != nullptr ? lo.get_context() : hi.get_context(); } @@ -89,9 +92,14 @@ template class cycle_scalar { { return _use_bn254_scalar_field_for_primality_test; } - void validate_scalar_is_in_field() const; + /** + * @brief Validates that the scalar (lo + hi * 2^LO_BITS) is less than the appropriate field modulus + * @details Checks against either bn254 scalar field or grumpkin scalar field based on internal flags + */ + void validate_scalar_is_in_field() const; explicit cycle_scalar(BigScalarField&); + /** * @brief Get the origin tag of the cycle_scalar (a merge of the lo and hi tags) * diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/logic/logic.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/logic/logic.cpp index 0b21c8c18119..28d4392c9913 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/logic/logic.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/logic/logic.cpp @@ -96,8 +96,8 @@ field_t logic::create_logic_constraint( right = right >> 32; } - field_pt a_slice_other = a.split_at(num_bits).first; - field_pt b_slice_other = b.split_at(num_bits).first; + field_pt a_slice_other = a.no_wrap_split_at(num_bits).first; + field_pt b_slice_other = b.no_wrap_split_at(num_bits).first; a_slice_other.assert_equal(a_accumulator, "stdlib logic: failed to reconstruct left operand"); b_slice_other.assert_equal(b_accumulator, "stdlib logic: failed to reconstruct right operand"); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/transcript/transcript.hpp b/barretenberg/cpp/src/barretenberg/stdlib/transcript/transcript.hpp index 01e837830bcc..091114dee82d 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/transcript/transcript.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/transcript/transcript.hpp @@ -10,6 +10,7 @@ #include "barretenberg/crypto/poseidon2/poseidon2.hpp" #include "barretenberg/stdlib/hash/poseidon2/poseidon2.hpp" #include "barretenberg/stdlib/primitives/field/field_conversion.hpp" +#include "barretenberg/stdlib/primitives/field/field_utils.hpp" #include "barretenberg/stdlib/primitives/group/cycle_group.hpp" #include "barretenberg/transcript/transcript.hpp" namespace bb::stdlib::recursion::honk { @@ -26,20 +27,18 @@ template struct StdlibTranscriptParams { } /** * @brief Split a challenge field element into two half-width challenges - * @details `lo` is 128 bits and `hi` is 126 bits. - * This should provide significantly more than our security parameter bound: 100 bits + * @details `lo` is 128 bits and `hi` is 126 bits which should provide significantly more than our security + * parameter bound: 100 bits. The decomposition is constrained to be unique. * * @param challenge * @return std::array */ static inline std::array split_challenge(const DataType& challenge) { - // use existing field-splitting code in cycle_scalar - using cycle_scalar = typename stdlib::cycle_group::cycle_scalar; - const cycle_scalar scalar = cycle_scalar(challenge); - scalar.lo.create_range_constraint(cycle_scalar::LO_BITS); - scalar.hi.create_range_constraint(cycle_scalar::HI_BITS); - return std::array{ scalar.lo, scalar.hi }; + const size_t lo_bits = DataType::native::Params::MAX_BITS_PER_ENDOMORPHISM_SCALAR; + // Constuct a unique lo/hi decomposition of the challenge (hi_bits will be 254 - 128 = 126) + const auto [lo, hi] = split_unique(challenge, lo_bits); + return std::array{ lo, hi }; } template static inline T convert_challenge(const DataType& challenge) {