diff --git a/barretenberg/acir_tests/internal_test_programs/double_verify_honk_proof/src/main.nr b/barretenberg/acir_tests/internal_test_programs/double_verify_honk_proof/src/main.nr index 5425ccfb13dc..b56ac0065c47 100644 --- a/barretenberg/acir_tests/internal_test_programs/double_verify_honk_proof/src/main.nr +++ b/barretenberg/acir_tests/internal_test_programs/double_verify_honk_proof/src/main.nr @@ -1,5 +1,5 @@ // This circuit aggregates two Honk proof from `assert_statement`. -use bb_proof_verification::{UltraHonkVerificationKey, UltraHonkProof, verify_ultrahonk_proof}; +use bb_proof_verification::{UltraHonkVerificationKey, UltraHonkProof, verify_honk_proof_non_zk}; fn main( verification_key: UltraHonkVerificationKey, @@ -11,13 +11,13 @@ fn main( // The second proof, currently set to be identical proof_b: UltraHonkProof, ) { - verify_ultrahonk_proof( + verify_honk_proof_non_zk( verification_key, proof, public_inputs, key_hash, ); - verify_ultrahonk_proof( + verify_honk_proof_non_zk( verification_key, proof_b, public_inputs, diff --git a/barretenberg/acir_tests/internal_test_programs/double_verify_honk_zk_proof/src/main.nr b/barretenberg/acir_tests/internal_test_programs/double_verify_honk_zk_proof/src/main.nr index a3a2e9d3883d..961a4279b166 100644 --- a/barretenberg/acir_tests/internal_test_programs/double_verify_honk_zk_proof/src/main.nr +++ b/barretenberg/acir_tests/internal_test_programs/double_verify_honk_zk_proof/src/main.nr @@ -1,5 +1,5 @@ // This circuit aggregates two ZK-Honk proofs from `assert_statement`. -use bb_proof_verification::{UltraHonkVerificationKey, UltraHonkZKProof, verify_ultrahonkzk_proof}; +use bb_proof_verification::{UltraHonkVerificationKey, UltraHonkZKProof, verify_honk_proof}; fn main( verification_key: UltraHonkVerificationKey, @@ -11,13 +11,13 @@ fn main( // The second proof, currently set to be identical proof_b: UltraHonkZKProof, ) { - verify_ultrahonkzk_proof( + verify_honk_proof( verification_key, proof, public_inputs, key_hash, ); - verify_ultrahonkzk_proof( + verify_honk_proof( verification_key, proof_b, public_inputs, diff --git a/barretenberg/acir_tests/internal_test_programs/verify_honk_proof/src/main.nr b/barretenberg/acir_tests/internal_test_programs/verify_honk_proof/src/main.nr index a4eaee45ef47..18d7d8b456fd 100644 --- a/barretenberg/acir_tests/internal_test_programs/verify_honk_proof/src/main.nr +++ b/barretenberg/acir_tests/internal_test_programs/verify_honk_proof/src/main.nr @@ -1,5 +1,5 @@ // This circuit aggregates a single Honk proof from `assert_statement`. -use bb_proof_verification::{UltraHonkVerificationKey, UltraHonkProof, verify_ultrahonk_proof}; +use bb_proof_verification::{UltraHonkVerificationKey, UltraHonkProof, verify_honk_proof_non_zk}; fn main( verification_key: UltraHonkVerificationKey, @@ -11,7 +11,7 @@ fn main( // I believe we want to eventually make it public too though. key_hash: Field, ) { - verify_ultrahonk_proof( + verify_honk_proof_non_zk( verification_key, proof, public_inputs, diff --git a/barretenberg/acir_tests/internal_test_programs/verify_honk_zk_proof/src/main.nr b/barretenberg/acir_tests/internal_test_programs/verify_honk_zk_proof/src/main.nr index 848c8c8189ea..4d7fd5f8c7ea 100644 --- a/barretenberg/acir_tests/internal_test_programs/verify_honk_zk_proof/src/main.nr +++ b/barretenberg/acir_tests/internal_test_programs/verify_honk_zk_proof/src/main.nr @@ -1,5 +1,5 @@ // This circuit aggregates a single ZK-Honk proof from `assert_statement`. -use bb_proof_verification::{UltraHonkVerificationKey, UltraHonkZKProof, verify_ultrahonkzk_proof}; +use bb_proof_verification::{UltraHonkVerificationKey, UltraHonkZKProof, verify_honk_proof}; fn main( verification_key: UltraHonkVerificationKey, @@ -10,7 +10,7 @@ fn main( // This is currently not public. It is fine given that the vk is a part of the circuit definition. key_hash: Field, ) { - verify_ultrahonkzk_proof( + verify_honk_proof( verification_key, proof, public_inputs, diff --git a/barretenberg/cpp/scripts/bench_hardware_concurrency.sh b/barretenberg/cpp/scripts/bench_hardware_concurrency.sh index 941a89888acc..49eb75879812 100755 --- a/barretenberg/cpp/scripts/bench_hardware_concurrency.sh +++ b/barretenberg/cpp/scripts/bench_hardware_concurrency.sh @@ -197,8 +197,8 @@ for test_case in test_cases: # Categorize metrics based on name if "Chonk" in metric_name or "Chonk" in metric_name: components["Main"][metric_name][cpu] = time_ms - elif "ProtogalaxyProver" in metric_name: - components["ProtogalaxyProver"][metric_name][cpu] = time_ms + elif "HypernovaProver" in metric_name or "HypernovaFoldingProver" in metric_name or "HypernovaFoldingVerifier" in metric_name: + components["HypernovaProver"][metric_name][cpu] = time_ms elif "OinkProver" in metric_name: components["OinkProver"][metric_name][cpu] = time_ms elif "Decider" in metric_name: @@ -224,7 +224,7 @@ for test_case in test_cases: # Generate tables for each component sections = [ ("Main Components", "Main"), - ("ProtogalaxyProver Components", "ProtogalaxyProver"), + ("HypernovaProver Components", "HypernovaProver"), ("OinkProver", "OinkProver"), ("Decider", "Decider"), ("Goblin", "Goblin"), @@ -259,7 +259,7 @@ for test_case in test_cases: count = int(count_match.group(1)) if count_match else None # Clean up metric name - clean_name = metric_name.replace('ProtogalaxyProver::', '').replace('OinkProver::', '') + clean_name = metric_name.replace('HypernovaFoldingProver::', '').replace('HypernovaFoldingVerifier::', '').replace('HypernovaProver::', '').replace('OinkProver::', '') row = generate_table_row(clean_name, times, available_cpus, count) print("| " + row + " |") diff --git a/barretenberg/cpp/scripts/test_chonk_standalone_vks_havent_changed.sh b/barretenberg/cpp/scripts/test_chonk_standalone_vks_havent_changed.sh index 64056e243cfe..b1e56a18d363 100755 --- a/barretenberg/cpp/scripts/test_chonk_standalone_vks_havent_changed.sh +++ b/barretenberg/cpp/scripts/test_chonk_standalone_vks_havent_changed.sh @@ -13,7 +13,7 @@ cd .. # - Generate a hash for versioning: sha256sum bb-chonk-inputs.tar.gz # - Upload the compressed results: aws s3 cp bb-chonk-inputs.tar.gz s3://aztec-ci-artifacts/protocol/bb-chonk-inputs-[hash(0:8)].tar.gz # Note: In case of the "Test suite failed to run ... Unexpected token 'with' " error, need to run: docker pull aztecprotocol/build:3.0 -pinned_short_hash="7222b532" +pinned_short_hash="8fa51383" pinned_chonk_inputs_url="https://aztec-ci-artifacts.s3.us-east-2.amazonaws.com/protocol/bb-chonk-inputs-${pinned_short_hash}.tar.gz" function compress_and_upload { @@ -77,7 +77,7 @@ function check_circuit_vks { local exit_code=0 if [[ "${2:-}" == "--update_inputs" ]]; then - output=$($bb check --update_inputs --scheme chonk --ivc_inputs_path "$flow_folder/ivc-inputs.msgpack" 2>&1) || exit_code=$? + output=$($bb check --vk_policy=rewrite --scheme chonk --ivc_inputs_path "$flow_folder/ivc-inputs.msgpack" 2>&1) || exit_code=$? else output=$($bb check --scheme chonk --ivc_inputs_path "$flow_folder/ivc-inputs.msgpack" 2>&1) || exit_code=$? fi diff --git a/barretenberg/cpp/src/barretenberg/api/api.hpp b/barretenberg/cpp/src/barretenberg/api/api.hpp index 7360b832790f..755152ffc757 100644 --- a/barretenberg/cpp/src/barretenberg/api/api.hpp +++ b/barretenberg/cpp/src/barretenberg/api/api.hpp @@ -23,7 +23,6 @@ class API { bool include_gates_per_opcode{ false }; // should we include gates_per_opcode in the gates command output bool slow_low_memory{ false }; // use file backed memory for polynomials std::string storage_budget; // storage budget for file backed memory (e.g. "500m", "2g") - bool update_inputs{ false }; // update inputs when check fails std::string vk_policy{ "default" }; // policy for handling VKs during IVC accumulation bool optimized_solidity_verifier{ false }; // should we use the optimized sol verifier? (temp) diff --git a/barretenberg/cpp/src/barretenberg/api/api_chonk.cpp b/barretenberg/cpp/src/barretenberg/api/api_chonk.cpp index 310cb0b9afcc..04f3e6a700e7 100644 --- a/barretenberg/cpp/src/barretenberg/api/api_chonk.cpp +++ b/barretenberg/cpp/src/barretenberg/api/api_chonk.cpp @@ -151,6 +151,7 @@ bool ChonkAPI::check_precomputed_vks(const Flags& flags, const std::filesystem:: bbapi::BBApiRequest request; std::vector raw_steps = PrivateExecutionStepRaw::load_and_decompress(input_path); + bbapi::VkPolicy vk_policy = bbapi::parse_vk_policy(flags.vk_policy); bool check_failed = false; for (auto& step : raw_steps) { if (step.vk.empty()) { @@ -163,7 +164,7 @@ bool ChonkAPI::check_precomputed_vks(const Flags& flags, const std::filesystem:: if (!response.valid) { info("VK mismatch detected for function ", step.function_name); - if (!flags.update_inputs) { + if (vk_policy != bbapi::VkPolicy::REWRITE) { info("Computed VK differs from precomputed VK in ivc-inputs.msgpack"); return false; } diff --git a/barretenberg/cpp/src/barretenberg/api/api_chonk.test.cpp b/barretenberg/cpp/src/barretenberg/api/api_chonk.test.cpp index fa951c636fbd..d0522125c95a 100644 --- a/barretenberg/cpp/src/barretenberg/api/api_chonk.test.cpp +++ b/barretenberg/cpp/src/barretenberg/api/api_chonk.test.cpp @@ -320,8 +320,8 @@ TEST_F(ChonkAPITests, CheckPrecomputedVksMismatch) bool result = api.check_precomputed_vks(ChonkAPI::Flags{}, input_path); EXPECT_FALSE(result); - // Check with --update_input should still fail but update the VK in the input. - result = api.check_precomputed_vks(ChonkAPI::Flags{ .update_inputs = true }, input_path); + // Check with --vk_policy=rewrite should still fail but update the VK in the input. + result = api.check_precomputed_vks(ChonkAPI::Flags{ .vk_policy = "rewrite" }, input_path); EXPECT_FALSE(result); // Check again and it should succeed with the updated VK. diff --git a/barretenberg/cpp/src/barretenberg/bb/cli.cpp b/barretenberg/cpp/src/barretenberg/bb/cli.cpp index 7ccd8facee42..ce53889be52a 100644 --- a/barretenberg/cpp/src/barretenberg/bb/cli.cpp +++ b/barretenberg/cpp/src/barretenberg/bb/cli.cpp @@ -280,18 +280,15 @@ int parse_and_run_cli_command(int argc, char* argv[]) "back to RAM (requires --slow_low_memory)."); }; - const auto add_update_inputs_flag = [&](CLI::App* subcommand) { - return subcommand->add_flag("--update_inputs", flags.update_inputs, "Update inputs if vk check fails."); - }; - const auto add_vk_policy_option = [&](CLI::App* subcommand) { return subcommand ->add_option("--vk_policy", flags.vk_policy, - "Policy for handling verification keys during IVC accumulation. 'default' uses the provided " - "VK as-is, 'check' verifies the provided VK matches the computed VK (throws error on " - "mismatch), 'recompute' always ignores the provided VK and treats it as nullptr.") - ->check(CLI::IsMember({ "default", "check", "recompute" }).name("is_member")); + "Policy for handling verification keys. 'default' uses the provided VK as-is, 'check' " + "verifies the provided VK matches the computed VK (throws error on mismatch), 'recompute' " + "always ignores the provided VK and treats it as nullptr, 'rewrite' checks the VK and " + "rewrites the input file with the correct VK if there's a mismatch (for check command).") + ->check(CLI::IsMember({ "default", "check", "recompute", "rewrite" }).name("is_member")); }; const auto add_optimized_solidity_verifier_flag = [&](CLI::App* subcommand) { @@ -342,7 +339,7 @@ int parse_and_run_cli_command(int argc, char* argv[]) add_bytecode_path_option(check); add_witness_path_option(check); add_ivc_inputs_path_options(check); - add_update_inputs_flag(check); + add_vk_policy_option(check); /*************************************************************************************************************** * Subcommand: gates diff --git a/barretenberg/cpp/src/barretenberg/bbapi/bbapi_shared.hpp b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_shared.hpp index 442c575d50f3..5b2aa6e982f6 100644 --- a/barretenberg/cpp/src/barretenberg/bbapi/bbapi_shared.hpp +++ b/barretenberg/cpp/src/barretenberg/bbapi/bbapi_shared.hpp @@ -21,9 +21,10 @@ namespace bb::bbapi { * @brief Policy for handling verification keys during IVC accumulation */ enum class VkPolicy { - DEFAULT, // Use the provided VK as-is (default behavior) - CHECK, // Verify the provided VK matches the computed VK, throw error if mismatch - RECOMPUTE // Always ignore the provided VK and treat it as nullptr + DEFAULT, // Use the provided VK as-is (default behavior) + CHECK, // Verify the provided VK matches the computed VK, throw error if mismatch + RECOMPUTE, // Always ignore the provided VK and treat it as nullptr + REWRITE // Check the VK and rewrite the input file with correct VK if mismatch (for check command) }; /** @@ -137,6 +138,9 @@ inline VkPolicy parse_vk_policy(const std::string& policy) if (policy == "recompute") { return VkPolicy::RECOMPUTE; } + if (policy == "rewrite") { + return VkPolicy::REWRITE; + } return VkPolicy::DEFAULT; // default } diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_format.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_format.cpp index c29ad54cf708..c90961ea31c0 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_format.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/acir_format.cpp @@ -274,7 +274,7 @@ void build_constraints(Builder& builder, AcirProgram& program, const ProgramMeta bool has_chonk_recursion_constraints = !constraint_system.chonk_recursion_constraints.empty(); if constexpr (IsMegaBuilder) { - // We shouldn't have both honk recursion constraints and pg recursion constraints. + // We shouldn't have both honk recursion constraints and HN recursion constraints. BB_ASSERT_EQ(!has_honk_recursion_constraints || !has_hn_recursion_constraints, true, "Invalid circuit: both honk and ivc recursion constraints present."); @@ -312,7 +312,7 @@ void build_constraints(Builder& builder, AcirProgram& program, const ProgramMeta // we return a vinfo for the case of Chonk + AVM BB_ASSERT_EQ(has_hn_recursion_constraints, false, - "Invalid circuit: pg recursion constraints are present with UltraBuilder."); + "Invalid circuit: HN recursion constraints are present with UltraBuilder."); BB_ASSERT_EQ(!(has_chonk_recursion_constraints && has_honk_recursion_constraints), true, "Invalid circuit: both honk and chonk recursion constraints are present."); diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/blake2s_constraint.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/blake2s_constraint.cpp index 601404eb41fe..1fcdbb950b52 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/blake2s_constraint.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/blake2s_constraint.cpp @@ -18,11 +18,9 @@ template void create_blake2s_constraints(Builder& builder, co using byte_array_ct = stdlib::byte_array; using field_ct = stdlib::field_t; - // Create byte array struct - byte_array_ct arr(&builder); + // Build input byte array by appending constrained byte_arrays + byte_array_ct arr = byte_array_ct::constant_padding(&builder, 0); // Start with empty array - // Get the witness assignment for each witness index - // Write the witness assignment to the byte_array for (const auto& witness_index_num_bits : constraint.inputs) { auto witness_index = witness_index_num_bits.blackbox_input; auto num_bits = witness_index_num_bits.num_bits; @@ -31,8 +29,11 @@ template void create_blake2s_constraints(Builder& builder, co auto num_bytes = round_to_nearest_byte(num_bits); field_ct element = to_field_ct(witness_index, builder); + + // byte_array_ct(field, num_bytes) constructor adds range constraints for each byte byte_array_ct element_bytes(element, num_bytes); + // Safe write: both arr and element_bytes are constrained arr.write(element_bytes); } diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/blake3_constraint.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/blake3_constraint.cpp index a7272a7d7351..0d826808580d 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/blake3_constraint.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/blake3_constraint.cpp @@ -17,11 +17,9 @@ template void create_blake3_constraints(Builder& builder, con using byte_array_ct = bb::stdlib::byte_array; using field_ct = bb::stdlib::field_t; - // Create byte array struct - byte_array_ct arr(&builder); + // Build input byte array by appending constrained byte_arrays + byte_array_ct arr = byte_array_ct::constant_padding(&builder, 0); // Start with empty array - // Get the witness assignment for each witness index - // Write the witness assignment to the byte_array for (const auto& witness_index_num_bits : constraint.inputs) { auto witness_index = witness_index_num_bits.blackbox_input; auto num_bits = witness_index_num_bits.num_bits; @@ -30,8 +28,11 @@ template void create_blake3_constraints(Builder& builder, con auto num_bytes = round_to_nearest_byte(num_bits); BB_ASSERT_LTE(num_bytes, 1024U, "barretenberg does not support blake3 inputs with more than 1024 bytes"); field_ct element = to_field_ct(witness_index, builder); + + // byte_array_ct(field, num_bytes) constructor adds range constraints for each byte byte_array_ct element_bytes(element, num_bytes); + // Safe write: both arr and element_bytes are constrained arr.write(element_bytes); } diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_constraints.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_constraints.cpp index d53e4aa01d3f..38df7d574f11 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_constraints.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_constraints.cpp @@ -25,10 +25,9 @@ using namespace bb; * coordinates. * 3. Conditionally select the public key, the signature, and the hash of the message when the predicate is witness * false. This ensures that the circuit is satisfied when the predicate is false. We set: - * - The first byte of r and s to 1 (NOTE: This only works when the order of the curve divided by two is bigger - * than \f$2^{241}\f$). + * - r = s = H(m) = 1 (the hash is set to 1 to avoid failures in the byte_array constructor) * - The public key to 2 times the generator of the curve (this is to avoid problems with lookup tables in - * secp265r1). + * secp265r1) * 4. Verify the signature against the public key and the hash of the message. We return a bool_t bearing witness to * whether the signature verification was successfull or not. * 5. Enforce that the result of the signature verification matches the expected result. @@ -60,7 +59,7 @@ void create_ecdsa_verify_constraints(typename Curve::Builder& builder, std::vector pub_x_fields = fields_from_witnesses(builder, input.pub_x_indices); std::vector pub_y_fields = fields_from_witnesses(builder, input.pub_y_indices); field_ct result_field = field_ct::from_witness_index(&builder, input.result); - field_ct predicate_field = to_field_ct(input.predicate, builder); + bool_ct predicate(to_field_ct(input.predicate, builder)); // Constructor enforces predicate = 0 or 1 if (!has_valid_witness_assignments) { // Fill builder variables in case of empty witness assignment @@ -68,40 +67,47 @@ void create_ecdsa_verify_constraints(typename Curve::Builder& builder, builder, hashed_message_fields, r_fields, s_fields, pub_x_fields, pub_y_fields, result_field); } - // Step 1. + // Step 1: Conditionally assign field values when predicate is false + if (!predicate.is_constant()) { + // Set r = s = H(m) = 1 when the predicate is false + for (size_t idx = 0; idx < 32; idx++) { + r_fields[idx] = field_ct::conditional_assign(predicate, r_fields[idx], field_ct(idx == 0 ? 1 : 0)); + s_fields[idx] = field_ct::conditional_assign(predicate, s_fields[idx], field_ct(idx == 0 ? 1 : 0)); + hashed_message_fields[idx] = + field_ct::conditional_assign(predicate, hashed_message_fields[idx], field_ct(idx == 0 ? 1 : 0)); + } + + // Set public key to 2*generator when predicate is false + // Compute as native type to get byte representation + typename Curve::AffineElementNative default_point_native(Curve::g1::one + Curve::g1::one); + std::array default_x_bytes; + std::array default_y_bytes; + Curve::fq::serialize_to_buffer(default_point_native.x, default_x_bytes.data()); + Curve::fq::serialize_to_buffer(default_point_native.y, default_y_bytes.data()); + + for (size_t i = 0; i < 32; ++i) { + pub_x_fields[i] = field_ct::conditional_assign(predicate, pub_x_fields[i], field_ct(default_x_bytes[i])); + pub_y_fields[i] = field_ct::conditional_assign(predicate, pub_y_fields[i], field_ct(default_y_bytes[i])); + } + } else { + BB_ASSERT(input.predicate.value, "Creating ECDSA constraints with a constant predicate equal to false."); + } + + // Step 2: Convert conditionally-assigned fields to byte arrays (adds range constraints on the correct values) byte_array_ct hashed_message = fields_to_bytes(builder, hashed_message_fields); byte_array_ct pub_x_bytes = fields_to_bytes(builder, pub_x_fields); byte_array_ct pub_y_bytes = fields_to_bytes(builder, pub_y_fields); byte_array_ct r = fields_to_bytes(builder, r_fields); byte_array_ct s = fields_to_bytes(builder, s_fields); - bool_ct result = static_cast(result_field); // Constructor enforces result = 0 or 1 - bool_ct predicate = static_cast(predicate_field); // Constructor enforces predicate = 0 or 1 + bool_ct result(result_field); // Constructor enforces result = 0 or 1 - // Step 2. + // Step 3: Construct public key from byte arrays Fq pub_x(pub_x_bytes); Fq pub_y(pub_y_bytes); // This constructor sets the infinity flag of public_key to false. This is OK because the point at infinity is not a // point on the curve and we check tha public_key is on the curve. G1 public_key(pub_x, pub_y); - // Step 3. - // There is one remaining edge case that happens with negligible probability, see here: - // https://github.com/AztecProtocol/barretenberg/issues/1570 - if (!input.predicate.is_constant) { - r[0] = field_ct::conditional_assign(predicate, r[0], field_ct(1)); // 0 < r < n - s[0] = field_ct::conditional_assign(predicate, s[0], field_ct(1)); // 0 < s < n/2 - - // P is on the curve - typename Curve::AffineElement default_point(Curve::g1::one + Curve::g1::one); - // BIGGROUP_AUDITTODO: mutable accessor needed for conditional_assign(). Could add a conditional_assign method - // to biggroup or could just perform these operations on the underlying fields prior to constructing the - // biggroup element. - public_key.x() = Fq::conditional_assign(predicate, public_key.x(), default_point.x()); - public_key.y() = Fq::conditional_assign(predicate, public_key.y(), default_point.y()); - } else { - BB_ASSERT(input.predicate.value, "Creating ECDSA constraints with a constant predicate equal to false."); - } - // Step 4. bool_ct signature_result = stdlib::ecdsa_verify_signature(hashed_message, public_key, { r, s }); diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_constraints.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_constraints.hpp index c5047794c83b..a93bb1002e2d 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_constraints.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_constraints.hpp @@ -34,15 +34,8 @@ using namespace bb; * predicate is witness false, then the constraint is disabled, i.e it must not fail and can return whatever. When * `predicate` is set to witness false, we override some values to ensure that all the circuit constraints are * satisfied: - * - We set the first byte of each component of the signature to 1 (NOTE: This only works when the order of the - * curve divided by two is bigger than \f$2^{241}\f$). + * - We set - r = s = H(m) = 1 (the hash is set to 1 to avoid failures in the byte_array constructor) * - We set the public key to be 2 times the generator of the curve. - * - * @note There is a small chance that when the predicate is witness false, the circuit still fails. This is due to ECDSA - * verification checking that \f$u_1 * G + u_2 * P\f$ is not the point at infinity. When the predicate is witness false, - * we set \f$P = 2G\f$, so the result of the scalar multiplication is the point at infinity when \f$u_1 + 2 u_2 = H(m) - * * s^{-1} + 2 * r * s^{-1} = 0 \mod n\f$, which means \f$H(m) + 2 * r = 0 \mod n\f$. Given that \f$r\f$ and \f$H(m)\f$ - * are both random 256-bit numbers, the probability of this happening is negligible. */ struct EcdsaConstraint { bb::CurveType type; diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_constraints.test.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_constraints.test.cpp index 89aa56581f96..5d5125255fcd 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_constraints.test.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/ecdsa_constraints.test.cpp @@ -28,21 +28,23 @@ template class EcdsaTestingFunctions { public: enum class Target : uint8_t { None, - R, // Invalidate R component of signature - ZeroS, // Set S=0 (tests ECDSA validation) - HighS, // Set S=high (tests malleability protection) - P, // Invalidate public key - Result // Invalid signature with claimed valid result + HashIsNotAByteArray, // Set one element of the hash > 255 + ZeroR, // Set R=0 (tests ECDSA validation) + ZeroS, // Set S=0 (tests ECDSA validation) + HighS, // Set S=high (tests malleability protection) + P, // Invalidate public key + Result // Invalid signature with claimed valid result }; static std::vector get_all() { - return { Target::None, Target::R, Target::ZeroS, Target::HighS, Target::P, Target::Result }; + return { Target::None, Target::HashIsNotAByteArray, Target::ZeroR, Target::ZeroS, Target::HighS, Target::P, + Target::Result }; } static std::vector get_labels() { - return { "None", "R", "Zero S", "High S", "Public key", "Result" }; + return { "None", "Hash is not a byte array", "Zero R", "Zero S", "High S", "Public key", "Result" }; } }; @@ -63,7 +65,11 @@ template class EcdsaTestingFunctions { } switch (invalid_witness_target) { - case InvalidWitness::Target::R: + case InvalidWitness::Target::HashIsNotAByteArray: + // Set first byte of hash to 256 (invalid byte) + witness_values[ecdsa_constraints.hashed_message[0]] = bb::fr(256); + break; + case InvalidWitness::Target::ZeroR: // Set r = 0 (invalid ECDSA signature component) for (size_t idx = 0; idx < 32; idx++) { witness_values[ecdsa_constraints.signature[idx]] = bb::fr(0); @@ -179,13 +185,13 @@ TYPED_TEST(EcdsaConstraintsTest, GenerateVKFromConstraints) TYPED_TEST(EcdsaConstraintsTest, ConstantTrue) { BB_DISABLE_ASSERTS(); - TestFixture::test_constant_true(TestFixture::InvalidWitnessTarget::R); + TestFixture::test_constant_true(TestFixture::InvalidWitnessTarget::Result); } TYPED_TEST(EcdsaConstraintsTest, WitnessTrue) { BB_DISABLE_ASSERTS(); - TestFixture::test_witness_true(TestFixture::InvalidWitnessTarget::R); + TestFixture::test_witness_true(TestFixture::InvalidWitnessTarget::Result); } TYPED_TEST(EcdsaConstraintsTest, WitnessFalse) @@ -198,7 +204,6 @@ TYPED_TEST(EcdsaConstraintsTest, WitnessFalseSlow) { // This test is equal to WitnessFalse but also checks that each configuration would have failed if the // predicate were witness true. It can be useful for debugging. - GTEST_SKIP(); BB_DISABLE_ASSERTS(); TestFixture::test_witness_false_slow(); } diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/utils.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/utils.hpp index ab2f6de86e37..7267c381e676 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/utils.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/utils.hpp @@ -46,7 +46,7 @@ static std::vector> fields_from_witnesses(Builder& builder, std */ template byte_array fields_to_bytes(Builder& builder, std::vector>& fields) { - byte_array result(&builder); + byte_array result = byte_array::constant_padding(&builder, /*length*/ 0); for (auto& field : fields) { // Construct byte array of length 1 from the field element // The constructor enforces that `field` fits in one byte diff --git a/barretenberg/cpp/src/barretenberg/hypernova/hypernova_prover.cpp b/barretenberg/cpp/src/barretenberg/hypernova/hypernova_prover.cpp index 937a40bac6fe..34729cc70075 100644 --- a/barretenberg/cpp/src/barretenberg/hypernova/hypernova_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/hypernova/hypernova_prover.cpp @@ -44,7 +44,7 @@ HypernovaFoldingProver::Accumulator HypernovaFoldingProver::sumcheck_output_to_a const std::shared_ptr& instance, const std::shared_ptr& honk_vk) { - BB_BENCH(); + BB_BENCH_NAME("HypernovaFoldingProver::sumcheck_output_to_accumulator"); // Generate challenges to batch shifted and unshifted polynomials/commitments/evaluation auto [unshifted_challenges, shifted_challenges] = get_batching_challenges(); @@ -91,7 +91,7 @@ Polynomial HypernovaFoldingProver::batch_polynomials const size_t& full_batched_size, const std::vector& challenges) { - BB_BENCH(); + BB_BENCH_NAME("HypernovaFoldingProver::batch_polynomials"); BB_ASSERT_EQ(full_batched_size, polynomials_to_batch[0].virtual_size(), "The virtual size of the first polynomial is different from the full batched size."); @@ -116,7 +116,7 @@ HypernovaFoldingProver::Accumulator HypernovaFoldingProver::instance_to_accumula const std::shared_ptr& instance, const std::shared_ptr& honk_vk) { - BB_BENCH(); + BB_BENCH_NAME("HypernovaFoldingProver::instance_to_accumulator"); vinfo("HypernovaFoldingProver: converting instance to accumulator..."); diff --git a/barretenberg/cpp/src/barretenberg/hypernova/hypernova_verifier.cpp b/barretenberg/cpp/src/barretenberg/hypernova/hypernova_verifier.cpp index 23ad1c54f472..19774bd2b083 100644 --- a/barretenberg/cpp/src/barretenberg/hypernova/hypernova_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/hypernova/hypernova_verifier.cpp @@ -49,7 +49,7 @@ HypernovaFoldingVerifier::Accumulator HypernovaFoldingVerifier:: HypernovaFoldingVerifier::MegaSumcheckOutput& sumcheck_output, const std::shared_ptr& instance) { - BB_BENCH(); + BB_BENCH_NAME("HypernovaFoldingVerifier::sumcheck_output_to_accumulator"); // Generate challenges to batch shifted and unshifted polynomials/commitments/evaluation auto [unshifted_challenges, shifted_challenges] = get_batching_challenges(); @@ -82,7 +82,7 @@ template SumcheckOutput HypernovaFoldingVerifier::sumcheck_on_incoming_instance( const std::shared_ptr& instance, const Proof& proof) { - BB_BENCH(); + BB_BENCH_NAME("HypernovaFoldingVerifier::sumcheck_on_incoming_instance"); vinfo("HypernovaFoldingVerifier: verifying Oink proof..."); // Complete the incoming verifier instance @@ -90,11 +90,6 @@ SumcheckOutput HypernovaFoldingVerifier::sumcheck_on_incoming_in transcript->load_proof(proof); verifier.verify(); - if constexpr (IsRecursiveFlavor) { - instance->target_sum = FF::from_witness_index(instance->builder, instance->builder->zero_idx()); - } else { - instance->target_sum = FF::zero(); - } instance->gate_challenges = transcript->template get_powers_of_challenge( "HypernovaFoldingProver:gate_challenge", Flavor::VIRTUAL_LOG_N); @@ -102,7 +97,7 @@ SumcheckOutput HypernovaFoldingVerifier::sumcheck_on_incoming_in vinfo("HypernovaFoldingVerifier: verifying Sumcheck to turn instance into an accumulator..."); std::vector padding_indicator_array(Flavor::VIRTUAL_LOG_N, 1); - SumcheckVerifier sumcheck(transcript, instance->alpha, Flavor::VIRTUAL_LOG_N, instance->target_sum); + SumcheckVerifier sumcheck(transcript, instance->alpha, Flavor::VIRTUAL_LOG_N); SumcheckOutput sumcheck_output = sumcheck.verify(instance->relation_parameters, instance->gate_challenges, padding_indicator_array); @@ -114,7 +109,7 @@ std::pair::Accumulator> Hypernov instance_to_accumulator(const std::shared_ptr& instance, const Proof& proof) { - BB_BENCH(); + BB_BENCH_NAME("HypernovaFoldingVerifier::instance_to_accumulator"); auto sumcheck_output = sumcheck_on_incoming_instance(instance, proof); @@ -135,7 +130,7 @@ std::tuple::Accumulator> H Flavor>::verify_folding_proof(const std::shared_ptr& instance, const HypernovaFoldingVerifier::Proof& proof) { - BB_BENCH(); + BB_BENCH_NAME("HypernovaFoldingVerifier::verify_folding_proof"); vinfo("HypernovaFoldingVerifier: verifying folding proof..."); diff --git a/barretenberg/cpp/src/barretenberg/solidity_helpers/circuits/blake_circuit.hpp b/barretenberg/cpp/src/barretenberg/solidity_helpers/circuits/blake_circuit.hpp index cf9f993dc62c..cbab86290d22 100644 --- a/barretenberg/cpp/src/barretenberg/solidity_helpers/circuits/blake_circuit.hpp +++ b/barretenberg/cpp/src/barretenberg/solidity_helpers/circuits/blake_circuit.hpp @@ -16,9 +16,13 @@ class BlakeCircuit { { Builder builder; - byte_array_ct input_buffer(&builder); + // Build byte array from field elements with proper constraints using write() pattern + byte_array_ct input_buffer(&builder, std::vector()); for (size_t i = 0; i < NUM_PUBLIC_INPUTS; ++i) { - input_buffer.write(byte_array_ct(field_ct(public_witness_ct(&builder, public_inputs[i])))); + field_ct field_element = public_witness_ct(&builder, public_inputs[i]); + // byte_array_ct(field_t) constructor adds range constraints for each byte + byte_array_ct field_bytes(field_element); + input_buffer.write(field_bytes); } bb::stdlib::Blake2s::hash(input_buffer); diff --git a/barretenberg/cpp/src/barretenberg/solidity_helpers/circuits/ecdsa_circuit.hpp b/barretenberg/cpp/src/barretenberg/solidity_helpers/circuits/ecdsa_circuit.hpp index d8dc9714fe14..3aacc3fbbcc3 100644 --- a/barretenberg/cpp/src/barretenberg/solidity_helpers/circuits/ecdsa_circuit.hpp +++ b/barretenberg/cpp/src/barretenberg/solidity_helpers/circuits/ecdsa_circuit.hpp @@ -29,10 +29,13 @@ class EcdsaCircuit { Builder builder; // IN CIRCUIT - // Create an input buffer the same size as our inputs - typename curve::byte_array_ct input_buffer(&builder, NUM_PUBLIC_INPUTS); + // Create an input buffer from public inputs (treating each as a single byte) + typename curve::byte_array_ct input_buffer(&builder, std::vector()); for (size_t i = 0; i < NUM_PUBLIC_INPUTS; ++i) { - input_buffer[i] = public_witness_ct(&builder, public_inputs[i]); + field_ct byte_value = public_witness_ct(&builder, public_inputs[i]); + // Constrain to be a single byte and create byte_array + typename curve::byte_array_ct single_byte(byte_value, 1); + input_buffer.write(single_byte); } // This is the message that we would like to confirm diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/blake2s/blake2s.cpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/blake2s/blake2s.cpp index c6c0172a4812..2f12f924cda7 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/blake2s/blake2s.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/blake2s/blake2s.cpp @@ -117,8 +117,15 @@ template void Blake2s::blake2s(blake2s_state& S, byt // Set last block. S.f[0] = field_t(uint256_t((uint32_t)-1)); - byte_array_ct final(in.get_context()); - final.write(in.slice(offset)).write(byte_array_ct(in.get_context(), BLAKE2S_BLOCKBYTES - size)); + // Build final block: remaining input + constant padding + Builder* ctx = in.get_context(); + auto remaining = in.slice(offset); + + // Combine remaining bytes and constant padding (no constraints needed for constants) + byte_array_ct final = remaining; // Copy constrained remaining bytes + byte_array_ct padding = byte_array_ct::constant_padding(ctx, BLAKE2S_BLOCKBYTES - size); + final.write(padding); + increment_counter(S, static_cast(size)); compress(S, final); } @@ -133,10 +140,13 @@ template byte_array Blake2s::hash(const byt blake2s(S, input); - byte_array_ct result(input.get_context()); - for (auto h : S.h) { + // Build result from state values + byte_array_ct result = byte_array_ct::constant_padding(input.get_context(), 0); + for (const auto& h : S.h) { + // byte_array_ct(field, num_bytes) constructor adds range constraints for each byte byte_array_ct v(h, 4); - result.write(v.reverse()); + auto reversed = v.reverse(); + result.write(reversed); } return result; } diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/blake2s/blake2s.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/blake2s/blake2s.test.cpp index 1c9dcff2d096..9bc25b8aad25 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/blake2s/blake2s.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/blake2s/blake2s.test.cpp @@ -76,28 +76,27 @@ TEST(stdlib_blake2s, test_witness_and_constant) // create a byte array that is a circuit witness std::string witness_str = "abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz"; std::vector witness_str_vec(witness_str.begin(), witness_str.end()); - byte_array_ct witness_str_ct(&builder, witness_str_vec); - - // create a byte array that is a circuit constant - std::vector constant_vec = { '0', '1' }; - std::vector constant_vec_field_ct; - for (auto b : constant_vec) { - constant_vec_field_ct.emplace_back(field_ct(bb::fr(b))); - } - byte_array_ct constant_vec_ct(&builder, constant_vec_field_ct); // create a byte array that is part circuit witness and part circuit constant - byte_array_ct input_arr(&builder); - input_arr.write(witness_str_ct).write(constant_vec_ct); + // start with the witness part, then append constant padding + byte_array_ct input_arr(&builder, witness_str_vec); + input_arr.write(byte_array_ct::constant_padding(&builder, 1, '0')) + .write(byte_array_ct::constant_padding(&builder, 1, '1')); - // hash the combined byte array - byte_array_ct output = stdlib::Blake2s::hash(input_arr); + // for expected value calculation + std::vector constant_vec = { '0', '1' }; // create expected input vector by concatenating witness and constant parts std::vector input_v; input_v.insert(input_v.end(), witness_str_vec.begin(), witness_str_vec.end()); input_v.insert(input_v.end(), constant_vec.begin(), constant_vec.end()); + // Verify the circuit input matches the expected input + EXPECT_EQ(input_arr.get_value(), input_v); + + // hash the combined byte array + byte_array_ct output = stdlib::Blake2s::hash(input_arr); + // compute expected hash auto expected = crypto::blake2s(input_v); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/blake3s/blake3s.cpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/blake3s/blake3s.cpp index fa88ea260876..b396324ad10c 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/blake3s/blake3s.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/blake3s/blake3s.cpp @@ -90,11 +90,15 @@ void Blake3s::compress_xof(const field_t cv[8], */ for (size_t i = 0; i < (BLAKE3_STATE_SIZE >> 1); i++) { const auto lookup_1 = plookup_read::get_lookup_accumulators(BLAKE_XOR, state[i], state[i + 8], true); + // byte_array(field, num_bytes) constructor adds range constraints for each byte byte_array out_bytes_1(lookup_1[ColumnIdx::C3][0], 4); + // Safe write: both out and out_bytes_1 are constrained out.write_at(out_bytes_1.reverse(), i * 4); const auto lookup_2 = plookup_read::get_lookup_accumulators(BLAKE_XOR, state[i + 8], cv[i], true); + // byte_array(field, num_bytes) constructor adds range constraints for each byte byte_array out_bytes_2(lookup_2[ColumnIdx::C3][0], 4); + // Safe write: both out and out_bytes_2 are constrained out.write_at(out_bytes_2.reverse(), (i + 8) * 4); } } @@ -105,17 +109,14 @@ Blake3s::output_t Blake3s::make_output(const field_t uint8_t block_len, uint8_t flags) { - output_t ret; + byte_array_ct block_copy = block; + // Initialize output_t with all fields + output_t ret{ .input_cv = {}, .block = block_copy, .block_len = block_len, .flags = flags }; + for (size_t i = 0; i < (BLAKE3_OUT_LEN >> 2); ++i) { ret.input_cv[i] = input_cv[i]; } - ret.block = byte_array_ct(block.get_context(), BLAKE3_BLOCK_LEN); - for (size_t i = 0; i < BLAKE3_BLOCK_LEN; i++) { - ret.block[i] = block[i]; - } - ret.block_len = block_len; - ret.flags = flags; return ret; } @@ -129,10 +130,8 @@ template void Blake3s::hasher_init(blake3_hasher* se self->key[i] = field_ct(uint256_t(IV[i])); self->cv[i] = field_ct(uint256_t(IV[i])); } - self->buf = byte_array_ct(self->context, BLAKE3_BLOCK_LEN); - for (size_t i = 0; i < BLAKE3_BLOCK_LEN; i++) { - self->buf[i] = field_t(self->context, 0); - } + // Create zero-filled constant buffer (no constraints needed) + self->buf = byte_array_ct::constant_padding(self->context, BLAKE3_BLOCK_LEN); self->buf_len = 0; self->blocks_compressed = 0; self->flags = 0; @@ -160,9 +159,9 @@ void Blake3s::hasher_update(blake3_hasher* self, const byte_array input_len) { take = input_len; } - for (size_t i = 0; i < take; i++) { - self->buf[self->buf_len + i] = input[i + start_counter]; - } + // Copy bytes from input to buf (input is constrained) + byte_array input_slice = input.slice(start_counter, take); + self->buf.write_at(input_slice, self->buf_len); self->buf_len = static_cast(self->buf_len + (uint8_t)take); input_len -= take; @@ -173,11 +172,11 @@ template void Blake3s::hasher_finalize(const blake3_ uint8_t block_flags = self->flags | maybe_start_flag(self) | CHUNK_END; output_t output = make_output(self->cv, self->buf, self->buf_len, block_flags); - byte_array_ct wide_buf(out.get_context(), BLAKE3_BLOCK_LEN); + // Create zero-filled constant buffer for compress_xof output (no constraints needed) + byte_array_ct wide_buf = byte_array_ct::constant_padding(out.get_context(), BLAKE3_BLOCK_LEN); compress_xof(output.input_cv, output.block, output.block_len, output.flags | ROOT, wide_buf); - for (size_t i = 0; i < BLAKE3_OUT_LEN; i++) { - out[i] = wide_buf[i]; - } + // Extract the output bytes by slicing (propagates constraint status) + out = wide_buf.slice(0, BLAKE3_OUT_LEN); } template byte_array Blake3s::hash(const byte_array& input) @@ -185,11 +184,19 @@ template byte_array Blake3s::hash(const byt BB_ASSERT(input.size() <= BLAKE3_CHUNK_LEN, "Barretenberg does not support blake3s with input lengths greater than 1024 bytes."); - blake3_hasher hasher = {}; - hasher.context = input.get_context(); + // Create zero-filled constant buffer for hasher (will be properly initialized by hasher_init) + Builder* ctx = input.get_context(); + byte_array_ct buf = byte_array_ct::constant_padding(ctx, BLAKE3_BLOCK_LEN); + + blake3_hasher hasher{ + .key = {}, .cv = {}, .buf = buf, .buf_len = 0, .blocks_compressed = 0, .flags = 0, .context = ctx + }; + hasher_init(&hasher); hasher_update(&hasher, input, input.size()); - byte_array_ct result(input.get_context(), BLAKE3_OUT_LEN); + + // Create output buffer (constants) + byte_array_ct result = byte_array_ct::constant_padding(ctx, BLAKE3_OUT_LEN); hasher_finalize(&hasher, result); return result; } diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/sha256/sha256.cpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/sha256/sha256.cpp index 679c604be97b..258ce9f98d1f 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/sha256/sha256.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/sha256/sha256.cpp @@ -359,7 +359,8 @@ template byte_array SHA256::hash(const byte rolling_hash = sha256_block(rolling_hash, hash_input); } - std::vector output; + // Build result by writing constrained byte_arrays + byte_array_ct result = byte_array_ct::constant_padding(ctx, 0); // Each element of rolling_hash is a 4-byte field_t, decompose rolling hash into bytes. for (const auto& word : rolling_hash) { // This constructor constrains @@ -367,12 +368,9 @@ template byte_array SHA256::hash(const byte // - the element reconstructed from bytes is equal to the given input. // - each entry to be a byte byte_array_ct word_byte_decomposition(word, 4); - for (size_t i = 0; i < 4; i++) { - output.push_back(word_byte_decomposition[i]); - } + result.write(word_byte_decomposition); } - // - return byte_array(ctx, output); + return result; } template class SHA256; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/decider_recursive_verifier.cpp b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/decider_recursive_verifier.cpp index 04415f9aa235..db4491aaa8af 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/decider_recursive_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/decider_recursive_verifier.cpp @@ -47,7 +47,7 @@ DeciderRecursiveVerifier_::PairingPoints DeciderRecursiveVerifier_ padding_indicator_array(Flavor::VIRTUAL_LOG_N, 1); - Sumcheck sumcheck(transcript, accumulator->alpha, Flavor::VIRTUAL_LOG_N, accumulator->target_sum); + Sumcheck sumcheck(transcript, accumulator->alpha, Flavor::VIRTUAL_LOG_N); SumcheckOutput output = sumcheck.verify(accumulator->relation_parameters, accumulator->gate_challenges, padding_indicator_array); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/recursive_verifier_instance.hpp b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/recursive_verifier_instance.hpp index cba79d644a3a..c91a2a97c7cc 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/recursive_verifier_instance.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/honk_verifier/recursive_verifier_instance.hpp @@ -44,7 +44,6 @@ template class RecursiveVerifierInstance_ { SubrelationSeparator alpha; RelationParameters relation_parameters; std::vector gate_challenges; - FF target_sum{ 0 }; WitnessCommitments witness_commitments; CommitmentLabels commitment_labels; @@ -84,7 +83,6 @@ template class RecursiveVerifierInstance_ { comm = Commitment::from_witness(builder, other_comms[comm_idx]); comm_idx++; } - target_sum = FF::from_witness(builder, verification_key->target_sum); size_t challenge_idx = 0; gate_challenges = std::vector(verification_key->gate_challenges.size()); for (auto& challenge : gate_challenges) { @@ -129,7 +127,6 @@ template class RecursiveVerifierInstance_ { zip_view(witness_commitments.get_all(), verifier_inst.witness_commitments.get_all())) { inst_comm = comm.get_value(); } - verifier_inst.target_sum = target_sum.get_value(); verifier_inst.gate_challenges = std::vector(gate_challenges.size()); for (auto [challenge, inst_challenge] : zip_view(gate_challenges, verifier_inst.gate_challenges)) { @@ -174,7 +171,6 @@ template class RecursiveVerifierInstance_ { this->relation_parameters.gamma); transcript.add_to_independent_hash_buffer(domain_separator + "verifier_inst_public_input_delta", this->relation_parameters.public_input_delta); - transcript.add_to_independent_hash_buffer(domain_separator + "verifier_inst_target_sum", this->target_sum); transcript.add_to_independent_hash_buffer(domain_separator + "verifier_inst_gate_challenges", this->gate_challenges); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield.hpp index 3f54f4fcd121..922ea0a56dfa 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield.hpp @@ -353,35 +353,6 @@ template class bigfield { bb::fr(negative_prime_modulus_mod_binary_basis.slice(NUM_LIMB_BITS * 3, NUM_LIMB_BITS * 4).lo), }; - /** - * @brief Convert the bigfield element to a byte array. Concatenates byte arrays of the high (2L bits) and low (2L - * bits) parts of the bigfield element. - * - * @details Assumes that 2L is divisible by 8, i.e. (NUM_LIMB_BITS * 2) % 8 == 0. Also we check that the bigfield - * element is in the target field. - * - * @return byte_array - */ - byte_array to_byte_array() const - { - byte_array result(get_context()); - // Prevents aliases - assert_is_in_field(); - field_t lo = binary_basis_limbs[0].element + (binary_basis_limbs[1].element * shift_1); - field_t hi = binary_basis_limbs[2].element + (binary_basis_limbs[3].element * shift_1); - // n.b. this only works if NUM_LIMB_BITS * 2 is divisible by 8 - // - // We are packing two bigfield limbs each into the field elements `lo` and `hi`. - // Thus, each of `lo` and `hi` will contain (NUM_LIMB_BITS * 2) bits. We then convert - // `lo` and `hi` to `byte_array` each containing ((NUM_LIMB_BITS * 2) / 8) bytes. - // Therefore, it is necessary for (NUM_LIMB_BITS * 2) to be divisible by 8 for correctly - // converting `lo` and `hi` to `byte_array`s. - BB_ASSERT_EQ((NUM_LIMB_BITS * 2 / 8) * 8, NUM_LIMB_BITS * 2); - result.write(byte_array(hi, 32 - (NUM_LIMB_BITS / 4))); - result.write(byte_array(lo, (NUM_LIMB_BITS / 4))); - return result; - } - // Gets the integer (uint512_t) value of the bigfield element by combining the binary basis limbs. uint512_t get_value() const; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield.test.cpp index 5ff30485f8c1..25a4a78d9103 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/bigfield/bigfield.test.cpp @@ -1469,26 +1469,6 @@ template class stdlib_bigfield : public testing::Test { EXPECT_EQ(result, true); } - static void test_to_byte_array() - { - auto builder = Builder(); - size_t num_repetitions = 10; - for (size_t i = 0; i < num_repetitions; ++i) { - auto [a_native, a_ct] = get_random_witness(&builder, true); // fq_native, fq_ct - byte_array_ct a_bytes_ct = a_ct.to_byte_array(); - - std::vector actual_bytes = a_bytes_ct.bytes(); - EXPECT_EQ(actual_bytes.size(), 32); - - for (size_t j = 0; j < actual_bytes.size(); ++j) { - const uint256_t expected = (uint256_t(a_native) >> (8 * j)).slice(0, 8); - EXPECT_EQ(actual_bytes[32 - 1 - j].get_value(), expected); - } - } - bool result = CircuitChecker::check(builder); - EXPECT_EQ(result, true); - } - // This check tests if elements are reduced to fit quotient into range proof static void test_quotient_completeness() { @@ -2447,10 +2427,6 @@ TYPED_TEST(stdlib_bigfield, byte_array_constructors) { TestFixture::test_byte_array_constructors(); } -TYPED_TEST(stdlib_bigfield, to_byte_array) -{ - TestFixture::test_to_byte_array(); -} TYPED_TEST(stdlib_bigfield, quotient_completeness_regression) { TestFixture::test_quotient_completeness(); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup.hpp index 52965cb440b0..32e2b90f68dc 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/biggroup/biggroup.hpp @@ -187,19 +187,6 @@ template class element { element& operator=(const element& other); element& operator=(element&& other) noexcept; - /** - * @brief Serialize the element to a byte array in form: (yhi || ylo || xhi || xlo). - * - * @return byte_array - */ - byte_array to_byte_array() const - { - byte_array result(get_context()); - result.write(_y.to_byte_array()); - result.write(_x.to_byte_array()); - return result; - } - element checked_unconditional_add(const element& other) const; element checked_unconditional_subtract(const element& other) const; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/bool/bool.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/bool/bool.cpp index 3a491a79a59a..13a38d320fe2 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/bool/bool.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/bool/bool.cpp @@ -7,6 +7,7 @@ #include "bool.hpp" #include "../circuit_builders/circuit_builders.hpp" #include "barretenberg/common/assert.hpp" +#include "barretenberg/stdlib/primitives/field/field.hpp" #include "barretenberg/transcript/origin_tag.hpp" using namespace bb; @@ -121,15 +122,7 @@ template bool_t& bool_t::operator=(const bo /** * @brief Assigns a `bool_t` to a `bool_t` object. */ -template bool_t& bool_t::operator=(const bool_t& other) -{ - context = other.context; - witness_index = other.witness_index; - witness_bool = other.witness_bool; - witness_inverted = other.witness_inverted; - tag = other.tag; - return *this; -} +template bool_t& bool_t::operator=(const bool_t& other) = default; /** * @brief Assigns a `bool_t` to a `bool_t` object. @@ -165,15 +158,16 @@ template bool_t& bool_t::operator=(const wi */ template bool_t bool_t::operator&(const bool_t& other) const { - bool_t result(context ? context : other.context); + Builder* ctx = validate_context(context, other.context); + bool_t result(ctx); bool left = witness_inverted ^ witness_bool; bool right = other.witness_inverted ^ other.witness_bool; result.witness_bool = left && right; - BB_ASSERT(result.context || (is_constant() && other.is_constant())); + BB_ASSERT(ctx || (is_constant() && other.is_constant())); if (!is_constant() && !other.is_constant()) { bb::fr value = result.witness_bool ? bb::fr::one() : bb::fr::zero(); - result.witness_index = context->add_variable(value); + result.witness_index = ctx->add_variable(value); /** * A bool can be represented by a witness value `w` and an 'inverted' flag `i` @@ -212,8 +206,7 @@ template bool_t bool_t::operator&(const boo fr q_o{ -1 }; fr q_c{ i_a * i_b }; - context->create_poly_gate( - { witness_index, other.witness_index, result.witness_index, q_m, q_l, q_r, q_o, q_c }); + ctx->create_poly_gate({ witness_index, other.witness_index, result.witness_index, q_m, q_l, q_r, q_o, q_c }); } else if (!is_constant() && other.is_constant()) { BB_ASSERT(!other.witness_inverted); // If rhs is a constant true, the output is determined by the lhs. Otherwise the output is a constant @@ -236,14 +229,16 @@ template bool_t bool_t::operator&(const boo */ template bool_t bool_t::operator|(const bool_t& other) const { - bool_t result(context ? context : other.context); + Builder* ctx = validate_context(context, other.context); + + bool_t result(ctx); - BB_ASSERT(result.context || (is_constant() && other.is_constant())); + BB_ASSERT(ctx || (is_constant() && other.is_constant())); result.witness_bool = (witness_bool ^ witness_inverted) | (other.witness_bool ^ other.witness_inverted); bb::fr value = result.witness_bool ? bb::fr::one() : bb::fr::zero(); if (!is_constant() && !other.is_constant()) { - result.witness_index = context->add_variable(value); + result.witness_index = ctx->add_variable(value); // Let // a := lhs = *this; // b := rhs = other; @@ -264,8 +259,7 @@ template bool_t bool_t::operator|(const boo // Let r := a | b; // Constrain // q_m * w_a * w_b + q_l * w_a + q_r * w_b + q_o * r + q_c = 0 - context->create_poly_gate( - { witness_index, other.witness_index, result.witness_index, q_m, q_l, q_r, q_o, q_c }); + ctx->create_poly_gate({ witness_index, other.witness_index, result.witness_index, q_m, q_l, q_r, q_o, q_c }); } else if (!is_constant() && other.is_constant()) { BB_ASSERT_EQ(other.witness_inverted, false); @@ -288,15 +282,16 @@ template bool_t bool_t::operator|(const boo */ template bool_t bool_t::operator^(const bool_t& other) const { - bool_t result(context == nullptr ? other.context : context); + Builder* ctx = validate_context(context, other.context); + bool_t result(ctx); - BB_ASSERT(result.context || (is_constant() && other.is_constant())); + BB_ASSERT(ctx || (is_constant() && other.is_constant())); result.witness_bool = (witness_bool ^ witness_inverted) ^ (other.witness_bool ^ other.witness_inverted); bb::fr value = result.witness_bool ? bb::fr::one() : bb::fr::zero(); if (!is_constant() && !other.is_constant()) { - result.witness_index = context->add_variable(value); + result.witness_index = ctx->add_variable(value); // Let // a := lhs = *this; // b := rhs = other; @@ -319,8 +314,7 @@ template bool_t bool_t::operator^(const boo // Let r := a ^ b; // Constrain // q_m * w_a * w_b + q_l * w_a + q_r * w_b + q_o * r + q_c = 0 - context->create_poly_gate( - { witness_index, other.witness_index, result.witness_index, q_m, q_l, q_r, q_o, q_c }); + ctx->create_poly_gate({ witness_index, other.witness_index, result.witness_index, q_m, q_l, q_r, q_o, q_c }); } else if (!is_constant() && other.is_constant()) { // witness ^ 1 = !witness BB_ASSERT_EQ(other.witness_inverted, false); @@ -355,8 +349,9 @@ template bool_t bool_t::operator!() const */ template bool_t bool_t::operator==(const bool_t& other) const { - BB_ASSERT(context || other.context || (is_constant() && other.is_constant())); - bool_t result(context ? context : other.context); + Builder* ctx = validate_context(context, other.context); + bool_t result(ctx); + BB_ASSERT(ctx || (is_constant() && other.is_constant())); result.witness_bool = (witness_bool ^ witness_inverted) == (other.witness_bool ^ other.witness_inverted); if (!is_constant() && !other.is_constant()) { @@ -382,8 +377,7 @@ template bool_t bool_t::operator==(const bo bb::fr q_o{ bb::fr::neg_one() }; bb::fr q_c{ 1 - lhs_inverted - rhs_inverted + 2 * rhs_inverted * lhs_inverted }; - context->create_poly_gate( - { witness_index, other.witness_index, result.witness_index, q_m, q_r, q_l, q_o, q_c }); + ctx->create_poly_gate({ witness_index, other.witness_index, result.witness_index, q_m, q_l, q_r, q_o, q_c }); } else if (!is_constant() && (other.is_constant())) { // Compare *this with a constant other. If other == true, then we're checking *this == true. In this case we @@ -423,7 +417,7 @@ template bool_t bool_t::operator||(const bo template void bool_t::assert_equal(const bool_t& rhs, std::string const& msg) const { const bool_t lhs = *this; - Builder* ctx = lhs.get_context() ? lhs.get_context() : rhs.get_context(); + Builder* ctx = validate_context(rhs.get_context(), lhs.get_context()); (void)OriginTag(get_origin_tag(), rhs.get_origin_tag()); if (lhs.is_constant() && rhs.is_constant()) { BB_ASSERT_EQ(lhs.get_value(), rhs.get_value()); @@ -528,7 +522,7 @@ template bool_t bool_t::normalize() const bb::fr q_o = bb::fr::neg_one(); bb::fr q_m = bb::fr::zero(); bb::fr q_r = bb::fr::zero(); - context->create_poly_gate({ witness_index, witness_index, new_witness, q_m, q_l, q_r, q_o, q_c }); + context->create_poly_gate({ witness_index, context->zero_idx(), new_witness, q_m, q_l, q_r, q_o, q_c }); witness_index = new_witness; witness_bool = value; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/byte_array/byte_array.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/byte_array/byte_array.cpp index d32c13b8a7a1..f342d1189fa5 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/byte_array/byte_array.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/byte_array/byte_array.cpp @@ -13,17 +13,6 @@ using namespace bb; namespace bb::stdlib { -template -byte_array::byte_array(Builder* parent_context) - : context(parent_context) -{} - -template -byte_array::byte_array(Builder* parent_context, const size_t n) - : context(parent_context) - , values(std::vector>(n)) -{} - /** * @brief Create a byte array out of a vector of uint8_t bytes. * @@ -55,6 +44,23 @@ byte_array::byte_array(Builder* parent_context, const std::string& inpu : byte_array(parent_context, std::vector(input.begin(), input.end())) {} +/** + * @brief Create a byte_array from constant values without adding range constraints. + * @details This is safe for constant data (like padding) because constants cannot be manipulated by the prover. + * Use this for padding, initialization, or other constant data to avoid unnecessary constraints. + */ +template +byte_array byte_array::from_constants(Builder* parent_context, std::vector const& input) +{ + bytes_t const_values; + const_values.reserve(input.size()); + for (const auto& byte : input) { + // Create constant field elements - no witness, no constraints + const_values.push_back(field_t(parent_context, byte)); + } + return byte_array(parent_context, const_values); +} + /** * @brief Create a byte_array of length `num_bytes` out of a field element. * @@ -184,6 +190,8 @@ byte_array::byte_array(const field_t& input, const field_t overlap = -diff_lo_hi + 1; // Ensure that (r - 1).hi - reconstructed_hi/shift - overlap is positive. + // SAFETY: reconstructed_hi is always a multiple of 2^128 by construction + // (bytes 0-15 all have scaling factors ≥ 2^128) const field_t diff_hi = (-reconstructed_hi / shift).add_two(s_hi, -overlap); diff_hi.create_range_constraint(128, "byte_array: y_hi doesn't fit in 128 bits."); } @@ -200,7 +208,7 @@ byte_array::byte_array(Builder* parent_context, bytes_t const& input) template byte_array::byte_array(Builder* parent_context, bytes_t&& input) : context(parent_context) - , values(input) + , values(std::move(input)) {} template @@ -211,7 +219,7 @@ byte_array::byte_array(const byte_array& other) } template -byte_array::byte_array(byte_array&& other) +byte_array::byte_array(byte_array&& other) noexcept : context(other.context) , values(std::move(other.values)) {} @@ -224,7 +232,7 @@ template byte_array& byte_array::operator=( return *this; } -template byte_array& byte_array::operator=(byte_array&& other) +template byte_array& byte_array::operator=(byte_array&& other) noexcept { context = other.context; values = std::move(other.values); @@ -234,12 +242,10 @@ template byte_array& byte_array::operator=( /** * @brief Convert a byte array into a field element. * - * @details The transformation is injective when the size of the byte array is < 32, which covers all the use cases. **/ template byte_array::operator field_t() const { const size_t bytes = values.size(); - BB_ASSERT(bytes < 32); static constexpr uint256_t one(1); std::vector> scaled_values; @@ -250,6 +256,7 @@ template byte_array::operator field_t() con return field_t::accumulate(scaled_values); } + /** * @brief Appends the contents of another `byte_array` (`other`) to the end of this one. */ @@ -260,8 +267,7 @@ template byte_array& byte_array::write(byte } /** - * @brief Overwrites this byte_array starting at index with the contents of other. Asserts that the write does not - * exceed the current size. + * @brief Overwrites this byte_array starting at index with the contents of other. */ template byte_array& byte_array::write_at(byte_array const& other, size_t index) { @@ -277,7 +283,7 @@ template byte_array& byte_array::write_at(b */ template byte_array byte_array::slice(size_t offset) const { - BB_ASSERT_DEBUG(offset < values.size()); + BB_ASSERT_LTE(offset, values.size()); return byte_array(context, bytes_t(values.begin() + static_cast(offset), values.end())); } @@ -287,9 +293,9 @@ template byte_array byte_array::slice(size_ **/ template byte_array byte_array::slice(size_t offset, size_t length) const { - BB_ASSERT_DEBUG(offset < values.size()); + BB_ASSERT_LTE(offset, values.size()); // it's <= cause vector constructor doesn't include end point - BB_ASSERT_DEBUG(length <= values.size() - offset); + BB_ASSERT_LTE(length, values.size() - offset); auto start = values.begin() + static_cast(offset); auto end = values.begin() + static_cast((offset + length)); return byte_array(context, bytes_t(start, end)); @@ -300,6 +306,9 @@ template byte_array byte_array::slice(size_ **/ template byte_array byte_array::reverse() const { + if (values.empty()) { + return *this; + } bytes_t bytes(values.size()); size_t offset = bytes.size() - 1; for (size_t i = 0; i < bytes.size(); i += 1, offset -= 1) { @@ -322,16 +331,6 @@ template std::vector byte_array::get_value( return bytes; } -/** - * @brief Given a `byte_array`, compute a vector containing the values of its entries and convert it to a string. - * @note Used only in tests. - */ -template std::string byte_array::get_string() const -{ - auto v = get_value(); - return std::string(v.begin(), v.end()); -} - template class byte_array; template class byte_array; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/byte_array/byte_array.fuzzer.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/byte_array/byte_array.fuzzer.hpp index 47a90e721536..568b3f3ef5c8 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/byte_array/byte_array.fuzzer.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/byte_array/byte_array.fuzzer.hpp @@ -348,18 +348,7 @@ template class ByteArrayFuzzBase { byte_array_t byte_array{ nullptr, std::vector{} }; - static std::vector get_value(const byte_array_t& byte_array) - { - /* Based on the PRNG, alternate between retrieving an std::vector - * and a string. - * These should be functionally equivalent. - */ - if (static_cast(VarianceRNG.next() % 2)) { - return byte_array.get_value(); - } else { - return from_to>(byte_array.get_string()); - } - } + static std::vector get_value(const byte_array_t& byte_array) { return byte_array.get_value(); } static const std::vector& bool_to_vector(const bool& b) { static const std::vector false_{ 0 }; @@ -462,7 +451,7 @@ template class ByteArrayFuzzBase { { const auto& ref = this->reference_value; - switch (VarianceRNG.next() % 8) { + switch (VarianceRNG.next() % 5) { case 0: #ifdef SHOW_INFORMATION std::cout << "byte_array_t(e);" << std::cout; @@ -470,30 +459,13 @@ template class ByteArrayFuzzBase { /* Construct via byte_array */ return ExecutionHandler(ref, byte_array_t(this->byte_array)); case 1: -#ifdef SHOW_INFORMATION - std::cout << "e.get_string();" << std::cout; -#endif - /* Construct via std::string */ - return ExecutionHandler(ref, byte_array_t(builder, this->byte_array.get_string())); - case 2: #ifdef SHOW_INFORMATION std::cout << "e.get_value();" << std::cout; #endif /* Construct via std::vector */ return ExecutionHandler(ref, byte_array_t(builder, this->byte_array.get_value())); - case 3: -#ifdef SHOW_INFORMATION - std::cout << "e.bytes();" << std::cout; -#endif - /* Construct via bytes_t */ - return ExecutionHandler(ref, byte_array_t(builder, this->byte_array.bytes())); - case 4: -#ifdef SHOW_INFORMATION - std::cout << "std::move(e.bytes());" << std::cout; -#endif - /* Construct via bytes_t move constructor */ - return ExecutionHandler(ref, byte_array_t(builder, std::move(this->byte_array.bytes()))); - case 5: { + // case 2 and 3: Removed - tested private bytes_t constructors (redundant with cases 0-1) + case 2: { const auto field = to_field_t(); if (field == std::nullopt) { @@ -523,7 +495,7 @@ template class ByteArrayFuzzBase { return ExecutionHandler(new_ref, byte_array_t(*field, num_bytes)); } } - case 6: { + case 3: { /* Create a byte_array with gibberish. * * The purpose of this is to ascertain that no gibberish @@ -541,7 +513,7 @@ template class ByteArrayFuzzBase { return ExecutionHandler(ref, ba); } break; - case 7: { + case 4: { static_assert(suint_t::MAX_BIT_NUM > 0); const auto field = to_field_t( /* One bit must be reserved */ diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/byte_array/byte_array.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/byte_array/byte_array.hpp index cf36ae76ef98..417069ac477a 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/byte_array/byte_array.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/byte_array/byte_array.hpp @@ -32,39 +32,47 @@ template class byte_array { Builder* context; bytes_t values; - public: - byte_array(Builder* parent_context = nullptr); - byte_array(Builder* parent_context, size_t const n); - byte_array(Builder* parent_context, std::string const& input); - byte_array(Builder* parent_context, std::vector const& input); + // Internal constructors - do NOT add constraints + // Only for use by member functions (slice, reverse, from_constants) byte_array(Builder* parent_context, bytes_t const& input); byte_array(Builder* parent_context, bytes_t&& input); - byte_array(const field_t& input, - const size_t num_bytes = 32, - std::optional test_val = std::nullopt); - byte_array(const byte_array& other); - byte_array(byte_array&& other); + // Create byte_array from constant values without adding range constraints + // Safe for padding and other constant data - constants can't be manipulated by the prover + static byte_array from_constants(Builder* parent_context, std::vector const& input); - byte_array& operator=(const byte_array& other); - byte_array& operator=(byte_array&& other); + public: + explicit byte_array(Builder* parent_context, std::string const& input); + // Explicit to prevent implicit conversion from size_t to std::vector + explicit byte_array(Builder* parent_context, std::vector const& input); + // Explicit to prevent implicit conversions from size_t/int to field_t + explicit byte_array(const field_t& input, + const size_t num_bytes = 32, + std::optional test_val = std::nullopt); + + // Convenience method for creating constant padding (common use case) + static byte_array constant_padding(Builder* parent_context, size_t num_bytes, uint8_t value = 0) + { + return from_constants(parent_context, std::vector(num_bytes, value)); + } + // Copy and move operations + byte_array(const byte_array& other); + byte_array(byte_array&& other) noexcept; + byte_array& operator=(const byte_array& other); + byte_array& operator=(byte_array&& other) noexcept; explicit operator field_t() const; field_t operator[](const size_t index) const - { - assert(values.size() > 0); - return values[index]; - } - - field_t& operator[](const size_t index) { BB_ASSERT_LT(index, values.size()); - return values[index]; } + // Append another byte_array to this one byte_array& write(byte_array const& other); + + // Overwrite bytes starting at index with contents of other byte_array& write_at(byte_array const& other, size_t index); byte_array slice(size_t offset) const; @@ -79,7 +87,6 @@ template class byte_array { // Out-of-circuit methods std::vector get_value() const; - std::string get_string() const; // OriginTag-specific methods void set_origin_tag(bb::OriginTag tag) diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/byte_array/byte_array.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/byte_array/byte_array.test.cpp index 7299c28c8e8e..617e51b39be8 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/byte_array/byte_array.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/byte_array/byte_array.test.cpp @@ -67,15 +67,6 @@ template class ByteArrayTest : public ::testing::Test { EXPECT_EQ(reversed_arr.bytes()[2].get_origin_tag(), clear_tag); } - void test_from_string_constructor() - { - Builder builder; - - std::string a = "ascii"; - byte_array_ct arr(&builder, a); - EXPECT_EQ(arr.get_string(), a); - } - void test_into_bytes_decomposition_less_than_32_bytes() { for (size_t num_bytes = 1; num_bytes < 32; num_bytes++) { @@ -211,10 +202,14 @@ template class ByteArrayTest : public ::testing::Test { field_ct b = witness_ct(&builder, slice_to_n_bytes(b_expected, 31)); b.set_origin_tag(challenge_origin_tag); - byte_array_ct arr(&builder); + // byte_array_ct(field, num_bytes) constructor adds range constraints for each byte + byte_array_ct a_bytes(a, 31); + byte_array_ct b_bytes(b, 31); - arr.write(byte_array_ct(a, 31)); - arr.write(byte_array_ct(b, 31)); + // Build byte_array by writing constrained byte_arrays + byte_array_ct arr(&builder, std::vector()); + arr.write(a_bytes); + arr.write(b_bytes); EXPECT_EQ(arr.size(), 62UL); @@ -236,15 +231,16 @@ template class ByteArrayTest : public ::testing::Test { for (size_t arr_length = 1; arr_length < 32; arr_length++) { Builder builder; - byte_array_ct test_array(&builder, arr_length); + // Generate random bytes std::vector native_bytes(arr_length); for (size_t idx = 0; idx < arr_length; idx++) { - uint8_t byte = engine.get_random_uint8(); - native_bytes[idx] = byte; - test_array[idx] = witness_ct(&builder, byte); + native_bytes[idx] = engine.get_random_uint8(); } + // Create byte_array from vector (this creates witnesses for each byte) + byte_array_ct test_array(&builder, native_bytes); + // Convert to field_t using the byte_array conversion field_ct represented_field_elt = static_cast(test_array); @@ -284,11 +280,6 @@ TYPED_TEST(ByteArrayTest, Reverse) TestFixture::test_reverse(); } -TYPED_TEST(ByteArrayTest, ConstructFromString) -{ - TestFixture::test_from_string_constructor(); -} - TYPED_TEST(ByteArrayTest, ByteDecompositionUnique) { TestFixture::test_into_bytes_decomposition_less_than_32_bytes(); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field_utils.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field_utils.cpp index 9e599db8ad1b..fb0cc5fbb2a5 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field_utils.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field_utils.cpp @@ -11,10 +11,10 @@ namespace bb::stdlib { template -void validate_split_in_field(const field_t& lo, - const field_t& hi, - const size_t lo_bits, - const uint256_t& field_modulus) +void validate_split_in_field_unsafe(const field_t& lo, + const field_t& hi, + const size_t lo_bits, + const uint256_t& field_modulus) { const size_t hi_bits = static_cast(field_modulus.get_msb()) + 1 - lo_bits; @@ -79,7 +79,9 @@ std::pair, field_t> split_unique(const field_t, field_t, field_t> split_unique( const field_t& field, const size_t lo_bits, const bool skip_range_constraints); -// Explicit instantiations for validate_split_in_field -template void validate_split_in_field(const field_t& lo, - const field_t& hi, - const size_t lo_bits, - const uint256_t& field_modulus); -template void validate_split_in_field(const field_t& lo, - const field_t& hi, - const size_t lo_bits, - const uint256_t& field_modulus); +// Explicit instantiations for validate_split_in_field_unsafe +template void validate_split_in_field_unsafe(const field_t& lo, + const field_t& hi, + const size_t lo_bits, + const uint256_t& field_modulus); +template void validate_split_in_field_unsafe(const field_t& lo, + const field_t& hi, + const size_t lo_bits, + const uint256_t& field_modulus); // Explicit instantiations for mark_witness_as_used template void mark_witness_as_used(const field_t& field); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field_utils.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field_utils.hpp index fc73c7cea021..e68c1ef01148 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field_utils.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/field/field_utils.hpp @@ -34,9 +34,14 @@ std::pair, field_t> split_unique(const field_t, field_t> split_unique(const field_t -void validate_split_in_field(const field_t& lo, - const field_t& hi, - const size_t lo_bits, - const uint256_t& field_modulus); +void validate_split_in_field_unsafe(const field_t& lo, + const field_t& hi, + const size_t lo_bits, + const uint256_t& field_modulus); /** * @brief Mark a field_t witness as used (for UltraBuilder only). diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp index 0bb09c40dc8f..91d8134c17e0 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.cpp @@ -41,9 +41,9 @@ template cycle_group::cycle_group(Builder* _context) * @param is_infinity */ template -cycle_group::cycle_group(field_t _x, field_t _y, bool_t is_infinity, bool assert_on_curve) - : _x(_x) - , _y(_y) +cycle_group::cycle_group(const field_t& x, const field_t& y, bool_t is_infinity, bool assert_on_curve) + : _x(x) + , _y(y) , _is_infinity(is_infinity) , _is_standard(is_infinity.is_constant()) { @@ -59,9 +59,15 @@ cycle_group::cycle_group(field_t _x, field_t _y, bool_t is_infinity, bo *this = constant_infinity(this->context); } - // We don't support points with only one constant coordinate since valid use-cases are limited and it complicates - // the logic - BB_ASSERT(_x.is_constant() == _y.is_constant(), "cycle_group: Inconsistent constancy of coordinates"); + // For the simplicity of methods in this class, we ensure that the coordinates of a point always have the same + // constancy. If they don't, we convert the non-constant coordinate to a fixed witness. + if (_x.is_constant() != _y.is_constant()) { + if (_x.is_constant()) { + _x.convert_constant_to_fixed_witness(context); + } else { + _y.convert_constant_to_fixed_witness(context); + } + } // Elements are always expected to be on the curve but may or may not be constrained as such. BB_ASSERT(get_value().on_curve(), "cycle_group: Point is not on curve"); @@ -83,9 +89,9 @@ cycle_group::cycle_group(field_t _x, field_t _y, bool_t is_infinity, bo * @param is_infinity */ template -cycle_group::cycle_group(const bb::fr& _x, const bb::fr& _y, bool is_infinity) - : _x(is_infinity ? 0 : _x) - , _y(is_infinity ? 0 : _y) +cycle_group::cycle_group(const bb::fr& x, const bb::fr& y, bool is_infinity) + : _x(is_infinity ? 0 : x) + , _y(is_infinity ? 0 : y) , _is_infinity(is_infinity) , _is_standard(true) , context(nullptr) @@ -942,8 +948,8 @@ typename cycle_group::batch_mul_internal_output cycle_group::_ std::array table_id = table::get_lookup_table_ids_for_point(point); multitable_ids.push_back(table_id[0]); multitable_ids.push_back(table_id[1]); - scalar_limbs.push_back(scalar.lo); - scalar_limbs.push_back(scalar.hi); + scalar_limbs.push_back(scalar.lo()); + scalar_limbs.push_back(scalar.hi()); } // Look up the multiples of each slice of each lo/hi scalar limb in the corresponding plookup table. @@ -1223,13 +1229,6 @@ cycle_group cycle_group::conditional_assign(const bool_t& pred _is_standard_res = predicate.get_value() ? lhs._is_standard : rhs._is_standard; } - // AUDITTODO: Talk to Sasha. Comment seems to be unrelated and its not clear why the logic is needed. - // Rare case when we bump into two constants, s.t. lhs = -rhs - if (x_res.is_constant() && !y_res.is_constant()) { - auto ctx = predicate.get_context(); - x_res = field_t::from_witness_index(ctx, ctx->put_constant_variable(x_res.get_value())); - } - cycle_group result(x_res, y_res, _is_infinity_res, /*assert_on_curve=*/false); result._is_standard = _is_standard_res; return result; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp index 6b2a07be2b48..8f8f631e4bd8 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_group.hpp @@ -67,8 +67,8 @@ template class cycle_group { public: cycle_group(Builder* _context = nullptr); - cycle_group(field_t _x, field_t _y, bool_t _is_infinity, bool assert_on_curve); - cycle_group(const bb::fr& _x, const bb::fr& _y, bool _is_infinity); + cycle_group(const field_t& x, const field_t& y, bool_t is_infinity, bool assert_on_curve); + cycle_group(const bb::fr& x, const bb::fr& y, bool is_infinity); cycle_group(const AffineElement& _in); static cycle_group one(Builder* _context); static cycle_group constant_infinity(Builder* _context = nullptr); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_scalar.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_scalar.cpp index 1fc034d5e4f8..a925141bea00 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_scalar.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_scalar.cpp @@ -12,15 +12,42 @@ namespace bb::stdlib { +/** + * @brief Private constructor that skips field validation (for internal use only) + * @details This constructor is used internally in contexts where validation has already been performed externally + * or where it is not required at all (e.g., 256-bit bitstrings). + * + * @tparam Builder + * @param lo Low LO_BITS of the scalar + * @param hi High HI_BITS of the scalar + * @param flag SkipValidation::FLAG explicitly indicates that validation should be skipped + */ template -cycle_scalar::cycle_scalar(const field_t& _lo, const field_t& _hi, bool skip_validation) - : lo(_lo) - , hi(_hi) +cycle_scalar::cycle_scalar(const field_t& lo, const field_t& hi, [[maybe_unused]] SkipValidation flag) + : _lo(lo) + , _hi(hi) +{} + +/** + * @brief Construct a cycle_scalar from lo and hi field elements + * @details Standard public constructor. Validates that (lo + hi * 2^LO_BITS) is less than the Grumpkin scalar field + * modulus. Use this constructor when creating cycle_scalars from arbitrary field elements that may not have been + * previously validated. + * + * @warning The validation performed by this constructor is only sound if the resulting cycle_scalar is used in a + * scalar multiplication operation (batch_mul), which provides the necessary range constraints on lo and hi. See + * validate_scalar_is_in_field() documentation for details. + * + * @tparam Builder + * @param lo Low LO_BITS of the scalar + * @param hi High HI_BITS of the scalar + */ +template +cycle_scalar::cycle_scalar(const field_t& lo, const field_t& hi) + : _lo(lo) + , _hi(hi) { - // Unless explicitly skipped, validate the scalar is in the Grumpkin scalar field - if (!skip_validation) { - validate_scalar_is_in_field(); - } + validate_scalar_is_in_field(); } /** @@ -33,8 +60,8 @@ template cycle_scalar::cycle_scalar(const ScalarFiel { const uint256_t value(in); const auto [lo_v, hi_v] = decompose_into_lo_hi_u256(value); - lo = lo_v; - hi = hi_v; + _lo = lo_v; + _hi = hi_v; } /** @@ -81,7 +108,7 @@ cycle_scalar cycle_scalar::from_u256_witness(Builder* context, const uint256_t hi_v = bitstring.slice(LO_BITS, num_bits); auto lo = field_t::from_witness(context, typename field_t::native(lo_v)); auto hi = field_t::from_witness(context, typename field_t::native(hi_v)); - cycle_scalar result{ lo, hi, /*skip_validation=*/true }; + cycle_scalar result{ lo, hi, SkipValidation::FLAG }; result._num_bits = num_bits; return result; } @@ -102,7 +129,7 @@ template cycle_scalar cycle_scalar::create_ // Note: split_unique validates the value is less than bn254::fr::modulus auto [lo, hi] = split_unique(in, LO_BITS, /*skip_range_constraints=*/true); // Note: we skip validation here since it is redundant with `split_unique` - return cycle_scalar{ lo, hi, /*skip_validation=*/true }; + return cycle_scalar{ lo, hi, SkipValidation::FLAG }; } /** @@ -147,10 +174,10 @@ template cycle_scalar::cycle_scalar(BigScalarField& const uint256_t value((scalar.get_value() % uint512_t(ScalarField::modulus)).lo); const auto [value_lo, value_hi] = decompose_into_lo_hi_u256(value); - lo = value_lo; - hi = value_hi; - lo.set_origin_tag(scalar.get_origin_tag()); - hi.set_origin_tag(scalar.get_origin_tag()); + _lo = value_lo; + _hi = value_hi; + _lo.set_origin_tag(scalar.get_origin_tag()); + _hi.set_origin_tag(scalar.get_origin_tag()); return; } @@ -187,7 +214,7 @@ template cycle_scalar::cycle_scalar(BigScalarField& BB_ASSERT_GT(NUM_LIMB_BITS * 2, LO_BITS); BB_ASSERT_LT(NUM_LIMB_BITS, LO_BITS); - // Step 3: limb1 contributes to both *this.lo and *this.hi. Compute the values of the two limb1 slices + // Step 3: limb1 contributes to both *this._lo and *this._hi. Compute the values of the two limb1 slices const size_t lo_bits_in_limb_1 = LO_BITS - NUM_LIMB_BITS; const auto limb1_max_bits = static_cast(limb1_max.get_msb() + 1); auto [limb1_lo, limb1_hi] = limb1.no_wrap_split_at(lo_bits_in_limb_1, limb1_max_bits); @@ -196,41 +223,50 @@ template cycle_scalar::cycle_scalar(BigScalarField& limb1_lo.set_origin_tag(limb1.get_origin_tag()); limb1_hi.set_origin_tag(limb1.get_origin_tag()); - // Step 4: Construct *this.lo out of limb0 and limb1_lo - lo = limb0 + (limb1_lo * BigScalarField::shift_1); + // Step 4: Construct *this._lo out of limb0 and limb1_lo + _lo = limb0 + (limb1_lo * BigScalarField::shift_1); - // Step 5: Construct *this.hi out of limb1_hi, limb2 and limb3 + // Step 5: Construct *this._hi out of limb1_hi, limb2 and limb3 const uint256_t limb_2_shift = uint256_t(1) << ((2 * NUM_LIMB_BITS) - LO_BITS); const uint256_t limb_3_shift = uint256_t(1) << ((3 * NUM_LIMB_BITS) - LO_BITS); - hi = limb1_hi.add_two(limb2 * limb_2_shift, limb3 * limb_3_shift); + _hi = limb1_hi.add_two(limb2 * limb_2_shift, limb3 * limb_3_shift); // Manually propagate the origin tag of the scalar to the lo/hi limbs - lo.set_origin_tag(scalar.get_origin_tag()); - hi.set_origin_tag(scalar.get_origin_tag()); + _lo.set_origin_tag(scalar.get_origin_tag()); + _hi.set_origin_tag(scalar.get_origin_tag()); validate_scalar_is_in_field(); }; template bool cycle_scalar::is_constant() const { - return (lo.is_constant() && hi.is_constant()); + return (_lo.is_constant() && _hi.is_constant()); } /** * @brief Validates that the scalar (lo + hi * 2^LO_BITS) is less than the Grumpkin scalar field modulus - * @details Delegates to `validate_split_in_field` + * @details Delegates to `validate_split_in_field_unsafe`, which uses a borrow-subtraction algorithm to check the + * inequality. + * + * @warning This validation assumes range constraints on the lo and hi limbs. Specifically: + * - lo < 2^LO_BITS (128 bits) + * - hi < 2^HI_BITS (126 bits) + * + * By design, these range constraints are not applied by this function. Instead, they are implicitly enforced when + * the cycle_scalar is used in scalar multiplication via batch_mul. * * @tparam Builder */ template void cycle_scalar::validate_scalar_is_in_field() const { - validate_split_in_field(lo, hi, LO_BITS, ScalarField::modulus); + // Using _unsafe variant: range constraints are deferred to batch_mul's decompose_into_default_range + validate_split_in_field_unsafe(_lo, _hi, LO_BITS, ScalarField::modulus); } template typename cycle_scalar::ScalarField cycle_scalar::get_value() const { - uint256_t lo_v(lo.get_value()); - uint256_t hi_v(hi.get_value()); + uint256_t lo_v(_lo.get_value()); + uint256_t hi_v(_hi.get_value()); return ScalarField(lo_v + (hi_v << LO_BITS)); } diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_scalar.hpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_scalar.hpp index 6198ed387241..23920b106f7f 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_scalar.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_scalar.hpp @@ -26,6 +26,10 @@ template class cycle_group; * @note The reason for not using `bigfield` to represent cycle scalars is that `bigfield` is inefficient in this * context. All required range checks for `cycle_scalar` can be obtained for free from the `batch_mul` algorithm, making * the range checks performed by `bigfield` largely redundant. + * + * @warning: The field validation performed by cycle_scalar constructors assumes that the lo/hi limbs will + * be range-constrained during scalar multiplication. The validation is ONLY sound when the cycle_scalar is used in a + * batch_mul operation (which applies range constraints as part of the MSM algorithm). */ template class cycle_scalar { public: @@ -38,10 +42,11 @@ template class cycle_scalar { static constexpr size_t LO_BITS = field_t::native::Params::MAX_BITS_PER_ENDOMORPHISM_SCALAR; static constexpr size_t HI_BITS = NUM_BITS - LO_BITS; - field_t lo; // LO_BITS of the scalar - field_t hi; // Remaining HI_BITS of the scalar + enum class SkipValidation { FLAG }; private: + field_t _lo; // LO_BITS of the scalar + field_t _hi; // Remaining HI_BITS of the scalar size_t _num_bits = NUM_BITS; /** @@ -55,58 +60,60 @@ template class cycle_scalar { return { value.slice(0, LO_BITS), value.slice(LO_BITS, NUM_BITS) }; } + cycle_scalar(const field_t& lo, const field_t& hi, SkipValidation flag); + + /** + * @brief Validates that the scalar (lo + hi * 2^LO_BITS) is less than the Grumpkin scalar field modulus + */ + void validate_scalar_is_in_field() const; + public: - // AUDITTODO: this is used only in the fuzzer. - cycle_scalar(const ScalarField& _in = 0); - cycle_scalar(const field_t& _lo, const field_t& _hi, bool skip_validation = false); - // AUDITTODO: this is used only in the fuzzer. Its not inherently problematic, but perhaps the fuzzer should use a - // production entrypoint. + cycle_scalar(const ScalarField& in = 0); + cycle_scalar(const field_t& lo, const field_t& hi); static cycle_scalar from_witness(Builder* context, const ScalarField& value); static cycle_scalar from_u256_witness(Builder* context, const uint256_t& bitstring); - static cycle_scalar create_from_bn254_scalar(const field_t& _in); + static cycle_scalar create_from_bn254_scalar(const field_t& in); explicit cycle_scalar(BigScalarField& scalar); [[nodiscard]] bool is_constant() const; ScalarField get_value() const; - Builder* get_context() const { return lo.get_context() != nullptr ? lo.get_context() : hi.get_context(); } + Builder* get_context() const { return _lo.get_context() != nullptr ? _lo.get_context() : _hi.get_context(); } [[nodiscard]] size_t num_bits() const { return _num_bits; } - /** - * @brief Validates that the scalar (lo + hi * 2^LO_BITS) is less than the Grumpkin scalar field modulus - */ - void validate_scalar_is_in_field() const; + const field_t& lo() const { return _lo; } + const field_t& hi() const { return _hi; } /** * @brief Get the origin tag of the cycle_scalar (a merge of the lo and hi tags) * * @return OriginTag */ - OriginTag get_origin_tag() const { return OriginTag(lo.get_origin_tag(), hi.get_origin_tag()); } + OriginTag get_origin_tag() const { return OriginTag(_lo.get_origin_tag(), _hi.get_origin_tag()); } /** * @brief Set the origin tag of lo and hi members of cycle scalar * * @param tag */ - void set_origin_tag(const OriginTag& tag) const + void set_origin_tag(const OriginTag& tag) { - lo.set_origin_tag(tag); - hi.set_origin_tag(tag); + _lo.set_origin_tag(tag); + _hi.set_origin_tag(tag); } /** * @brief Set the free witness flag for the cycle scalar's tags */ void set_free_witness_tag() { - lo.set_free_witness_tag(); - hi.set_free_witness_tag(); + _lo.set_free_witness_tag(); + _hi.set_free_witness_tag(); } /** * @brief Unset the free witness flag for the cycle scalar's tags */ void unset_free_witness_tag() { - lo.unset_free_witness_tag(); - hi.unset_free_witness_tag(); + _lo.unset_free_witness_tag(); + _hi.unset_free_witness_tag(); } }; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_scalar.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_scalar.test.cpp index ef1a70cddc9c..ba3020ba20c2 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_scalar.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/cycle_scalar.test.cpp @@ -48,8 +48,8 @@ TYPED_TEST(CycleScalarTest, TestFromWitness) EXPECT_EQ(scalar.num_bits(), cycle_scalar::NUM_BITS); // Check that lo and hi reconstruct to the original value - uint256_t lo_val = uint256_t(scalar.lo.get_value()); - uint256_t hi_val = uint256_t(scalar.hi.get_value()); + uint256_t lo_val = uint256_t(scalar.lo().get_value()); + uint256_t hi_val = uint256_t(scalar.hi().get_value()); uint256_t reconstructed = lo_val + (hi_val << cycle_scalar::LO_BITS); EXPECT_EQ(ScalarField(reconstructed), scalar_val); @@ -74,8 +74,8 @@ TYPED_TEST(CycleScalarTest, TestFromU256Witness) EXPECT_EQ(scalar.num_bits(), 256); // Check that lo and hi reconstruct to the original value - uint256_t lo_val = uint256_t(scalar.lo.get_value()); - uint256_t hi_val = uint256_t(scalar.hi.get_value()); + uint256_t lo_val = uint256_t(scalar.lo().get_value()); + uint256_t hi_val = uint256_t(scalar.hi().get_value()); uint256_t reconstructed = lo_val + (hi_val << cycle_scalar::LO_BITS); EXPECT_EQ(reconstructed, value); @@ -103,8 +103,8 @@ TYPED_TEST(CycleScalarTest, TestCreateFromBn254Scalar) EXPECT_FALSE(scalar.is_constant()); // Check that lo and hi reconstruct to the original value - uint256_t lo_val = uint256_t(scalar.lo.get_value()); - uint256_t hi_val = uint256_t(scalar.hi.get_value()); + uint256_t lo_val = uint256_t(scalar.lo().get_value()); + uint256_t hi_val = uint256_t(scalar.hi().get_value()); uint256_t reconstructed = lo_val + (hi_val << cycle_scalar::LO_BITS); EXPECT_EQ(NativeField(reconstructed), field_val.get_value()); @@ -133,8 +133,8 @@ TYPED_TEST(CycleScalarTest, TestBigScalarFieldConstructor) EXPECT_FALSE(scalar.is_constant()); // Verify lo/hi decomposition matches - uint256_t lo_val = uint256_t(scalar.lo.get_value()); - uint256_t hi_val = uint256_t(scalar.hi.get_value()); + uint256_t lo_val = uint256_t(scalar.lo().get_value()); + uint256_t hi_val = uint256_t(scalar.hi().get_value()); uint256_t reconstructed = lo_val + (hi_val << cycle_scalar::LO_BITS); EXPECT_EQ(ScalarField(reconstructed), value); @@ -153,8 +153,8 @@ TYPED_TEST(CycleScalarTest, TestBigScalarFieldConstructor) EXPECT_TRUE(scalar.is_constant()); // Verify lo/hi decomposition matches - uint256_t lo_val = uint256_t(scalar.lo.get_value()); - uint256_t hi_val = uint256_t(scalar.hi.get_value()); + uint256_t lo_val = uint256_t(scalar.lo().get_value()); + uint256_t hi_val = uint256_t(scalar.hi().get_value()); uint256_t reconstructed = lo_val + (hi_val << cycle_scalar::LO_BITS); EXPECT_EQ(ScalarField(reconstructed), value); @@ -219,12 +219,12 @@ TYPED_TEST(CycleScalarTest, TestScalarFieldValidationFailureBetweenModuli) // Verify the reconstructed value matches what we expect uint256_t reconstructed = - uint256_t(scalar.lo.get_value()) + (uint256_t(scalar.hi.get_value()) << cycle_scalar::LO_BITS); + uint256_t(scalar.lo().get_value()) + (uint256_t(scalar.hi().get_value()) << cycle_scalar::LO_BITS); EXPECT_EQ(reconstructed, value_between_moduli); - // Now directly call validate_split_in_field with BN254::fr modulus + // Now directly call validate_split_in_field_unsafe with BN254::fr modulus // This should create unsatisfied constraints because value > BN254::fr modulus - bb::stdlib::validate_split_in_field(lo, hi, cycle_scalar::LO_BITS, bn254_fr_modulus); + bb::stdlib::validate_split_in_field_unsafe(lo, hi, cycle_scalar::LO_BITS, bn254_fr_modulus); // The builder should have failed EXPECT_TRUE(builder.failed()); @@ -248,8 +248,8 @@ TYPED_TEST(CycleScalarTest, TestBigScalarFieldConstructorEdgeCases) cycle_scalar scalar(zero_scalar); EXPECT_EQ(scalar.get_value(), ScalarField(0)); - EXPECT_EQ(scalar.lo.get_value(), 0); - EXPECT_EQ(scalar.hi.get_value(), 0); + EXPECT_EQ(scalar.lo().get_value(), 0); + EXPECT_EQ(scalar.hi().get_value(), 0); check_circuit_and_gate_count(builder, 3523); } @@ -263,8 +263,8 @@ TYPED_TEST(CycleScalarTest, TestBigScalarFieldConstructorEdgeCases) cycle_scalar scalar(small_scalar); EXPECT_EQ(scalar.get_value(), ScalarField(small_value)); - EXPECT_EQ(scalar.lo.get_value(), small_value); - EXPECT_EQ(scalar.hi.get_value(), 0); + EXPECT_EQ(scalar.lo().get_value(), small_value); + EXPECT_EQ(scalar.hi().get_value(), 0); check_circuit_and_gate_count(builder, 3523); } diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/straus_scalar_slice.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/straus_scalar_slice.cpp index 2cf19556426a..cf0433918dd1 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/straus_scalar_slice.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/group/straus_scalar_slice.cpp @@ -84,8 +84,8 @@ straus_scalar_slices::straus_scalar_slices(Builder* context, constexpr size_t LO_BITS = cycle_scalar::LO_BITS; const size_t lo_bits = scalar.num_bits() > LO_BITS ? LO_BITS : scalar.num_bits(); const size_t hi_bits = scalar.num_bits() > LO_BITS ? scalar.num_bits() - LO_BITS : 0; - auto hi_slices = compute_scalar_slices(context, scalar.hi, hi_bits, table_bits); - auto lo_slices = compute_scalar_slices(context, scalar.lo, lo_bits, table_bits); + auto hi_slices = compute_scalar_slices(context, scalar.hi(), hi_bits, table_bits); + auto lo_slices = compute_scalar_slices(context, scalar.lo(), lo_bits, table_bits); std::copy(lo_slices.first.begin(), lo_slices.first.end(), std::back_inserter(slices)); std::copy(hi_slices.first.begin(), hi_slices.first.end(), std::back_inserter(slices)); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/safe_uint/safe_uint.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/safe_uint/safe_uint.test.cpp index a6d3839fc37d..d4cf6847365b 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/safe_uint/safe_uint.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/safe_uint/safe_uint.test.cpp @@ -759,26 +759,3 @@ TYPED_TEST(SafeUintTest, TestDivRemainderConstraint) bool result = CircuitChecker::check(builder); EXPECT_EQ(result, false); } - -TYPED_TEST(SafeUintTest, TestByteArrayConversion) -{ - STDLIB_TYPE_ALIASES - auto builder = Builder(); - - field_ct elt = witness_ct(&builder, 0x7f6f5f4f00010203); - elt.set_origin_tag(next_challenge_tag); - suint_ct safe(elt, 63); - // safe.value is a uint256_t, so we serialize to a 32-byte array - std::string expected = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x7f, 0x6f, 0x5f, 0x4f, 0x00, 0x01, 0x02, 0x03 }; - - byte_array_ct arr(&builder); - arr.write(static_cast(safe)); - EXPECT_EQ(arr.get_string(), expected); - // Conversion to byte_array preserves tags - for (const auto& single_byte : arr.bytes()) { - EXPECT_EQ(single_byte.get_origin_tag(), next_challenge_tag); - } - EXPECT_EQ(arr.get_origin_tag(), next_challenge_tag); -} diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/decider_prover.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/decider_prover.cpp index e3f53bfec9ee..1784755b7d7f 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/decider_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/decider_prover.cpp @@ -12,22 +12,17 @@ namespace bb { /** - * Create DeciderProver_ from an accumulator. - * - * @param accumulator Relaxed instance (ϕ, ω, \vec{β}, e) whose proof we want to generate, produced by Protogalaxy - * folding prover - * - * @tparam a type of UltraFlavor - * */ + * Create DeciderProver_ from a prover instance. + */ template -DeciderProver_::DeciderProver_(const std::shared_ptr& prover_instance, - const std::shared_ptr& transcript) +DeciderProver_::DeciderProver_(std::shared_ptr prover_instance, + std::shared_ptr transcript) : prover_instance(std::move(prover_instance)) - , transcript(transcript) + , transcript(std::move(transcript)) {} /** - * @brief Run Sumcheck to establish that ∑_i pow(\vec{β*})f_i(ω) = e*. This results in u = (u_1,...,u_d) sumcheck round + * @brief Run Sumcheck to establish that ∑_i pow(\vec{β*})f_i(ω) = 0. This results in u = (u_1,...,u_d) sumcheck round * challenges and all evaluations at u being calculated. * */ diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/decider_prover.hpp b/barretenberg/cpp/src/barretenberg/ultra_honk/decider_prover.hpp index b943a0bc72c8..7f855bfc2303 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/decider_prover.hpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/decider_prover.hpp @@ -35,8 +35,7 @@ template class DeciderProver_ { using Proof = typename Flavor::Transcript::Proof; public: - explicit DeciderProver_(const std::shared_ptr&, - const std::shared_ptr& transcript = std::make_shared()); + explicit DeciderProver_(std::shared_ptr, std::shared_ptr transcript); BB_PROFILE void execute_relation_check_rounds(); BB_PROFILE void execute_pcs_rounds(); diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/decider_verifier.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/decider_verifier.cpp index d369242559ed..7a260b0cf54f 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/decider_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/decider_verifier.cpp @@ -57,7 +57,7 @@ template typename DeciderVerifier_::Output DeciderVeri } } - SumcheckVerifier sumcheck(transcript, accumulator->alpha, virtual_log_n, accumulator->target_sum); + SumcheckVerifier sumcheck(transcript, accumulator->alpha, virtual_log_n); // For MegaZKFlavor: receive commitments to Libra masking polynomials std::array libra_commitments = {}; if constexpr (Flavor::HasZK) { diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/prover_instance.hpp b/barretenberg/cpp/src/barretenberg/ultra_honk/prover_instance.hpp index 40cf58a1f265..072ca756f61f 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/prover_instance.hpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/prover_instance.hpp @@ -27,43 +27,7 @@ namespace bb { /** * @brief A ProverInstance is normally constructed from a finalized circuit and it contains all the information - * required by a Mega Honk prover to create a proof. A ProverInstance is also the result of running the - * Protogalaxy prover, in which case it becomes a relaxed counterpart with the folding parameters (target sum and gate - * challenges set to non-zero values). - * - * @details A ProverInstance is the equivalent of \f$\omega\f$ in the Protogalaxy paper. - * - * Our arithmetization works as follows. The Flavor defines \f$fM\f$ (Flavor::NUM_ALL_ENTITIES) and a series of - * relations - * \f$R_1, \dots, R_n\f$ (Flavor::Relations_). Each relation is made up by a series of subrelations: \f$R_i = - * (R_{i,1}, \dots, R_{i,r_i})\f$. - * - * Write \f$p_1, \dots, p_M\f$ for the prover polynomials and \f$p_{i,k}\f$ for the \f$k\f$-th coefficient of \f$p_i\f$. - * Write \f$\theta_1, \dots, \theta_6\f$ for the relation parameters. Let \f$n\f$ be the max degree of the prover - * polynomials. A pure ProverInstance is valid if for all \f$i, j, k\f$ we have \f$R_{i,j}(p_{1,k}, \dots, - * p_{M,k}, \theta_1, \dots, \theta_6) = 0\f$. - * - * Instead of checking each equality separately, we batch them using challenges that we call `alphas`. Thus, a - * ProverInstance is valid if for each \f$k = 0, \dots, n\f$. - * \f[ - * f_k(\omega) := \sum_{i, j} \alpha_{i,j} R_{i,j}(p_{1,k}, \dots, p_{M,k}, \theta_1, \dots, \theta_6) = 0 - * \f] - * - * Instead of checking each equality separately, we once again batch them using challenges. These challenges are the - * \f$pow_i(\beta)\f$ in the Protogalaxy paper, and are derived using the vector `gate_challenges` as the vector - * \f$\beta\f$. Write \f$gc\f$ for the vector `gate_challenges`. Then, a ProverInstance is valid if - * \f[ - * \sum_{k} pow_k(gc) f_k(\omega) = 0 - * \f] - * The equation is modified for a relaxed ProverInstance to - * \f[ - * \sum_{k} pow_k(gc) f_k(\omega) = ts - * \f] - * where we write \f$ts\f$ for the vector `target_sum`. - * - * Hence, the correspondence between the class below and the Protogalaxy paper is \f$\omega = (p_1, \dots, p_M, , - * \theta_1, \dots, \theta_6, \alpha_{1,1}, \dots, \alpha_{n,r_n})\f$, \f$\beta\f$ are the `gate_challenges`, and - * \f$e\f$ is `target_sum`. + * required by a Mega Honk prover to create a proof. */ template class ProverInstance_ { @@ -92,12 +56,10 @@ template class ProverInstance_ { SubrelationSeparator alpha; // single challenge from which powers are computed for batching subrelations bb::RelationParameters relation_parameters; std::vector gate_challenges; - FF target_sum{ 0 }; // Sumcheck target sum HonkProof ipa_proof; // utilized only for UltraRollupFlavor - bool is_relaxed_instance = false; // whether this instance is relaxed or not - bool is_complete = false; // whether this instance has been completely populated + bool is_complete = false; // whether this instance has been completely populated std::vector memory_read_records; std::vector memory_write_records; diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_verifier.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_verifier.cpp index 85294fac6bfc..8198905759de 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_verifier.cpp @@ -33,7 +33,6 @@ UltraVerifier_::UltraVerifierOutput UltraVerifier_::verify_proof // Determine the number of rounds in the sumcheck based on whether or not padding is employed const size_t log_n = Flavor::USE_PADDING ? Flavor::VIRTUAL_LOG_N : static_cast(verifier_instance->vk->log_circuit_size); - verifier_instance->target_sum = 0; verifier_instance->gate_challenges = transcript->template get_powers_of_challenge("Sumcheck:gate_challenge", log_n); diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/verifier_instance.hpp b/barretenberg/cpp/src/barretenberg/ultra_honk/verifier_instance.hpp index 5026911cffc7..0a9954eb6667 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/verifier_instance.hpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/verifier_instance.hpp @@ -12,15 +12,8 @@ namespace bb { /** * @brief The VerifierInstance encapsulates all the necessary information for a Mega Honk Verifier to verify a - * proof (sumcheck + Shplemini). In the context of folding, this is returned by the Protogalaxy verifier with non-zero - * target sum. - * - * @details This is \f$\phi\f$ in the Protogalaxy paper. It is the committed version of a ProverInstance_. With the - * notation used in ProverInstance_, a prover instance is \f$\omega = (p_1, \dots, p_M, \theta_1, \dots, \theta_6, - * \alpha_{1,1}, \dots, \alpha_{n,r_n})\f$ where the \f$p_i\f$'s are the prover polynomials, the \f$\theta_i\f$'s are - * the relation parameters, and the \f$\alpha_{i,j}\f$'s are the subrelation batching parameters. Then, \f$\phi\f$ is - * given by \f$\omega = ([p_1], \dots, [p_M], \theta_1, \dots, \theta_6, \alpha_{1,1}, \dots, \alpha_{n,r_n})\f$m where - * [p_i] denotes the commitment to the i-th prover polynomial. + * proof (sumcheck + Shplemini). In the context of folding, this is provided to the Hypernova verifier as an incoming + * instance. */ template class VerifierInstance_ { public: @@ -42,8 +35,6 @@ template class VerifierInstance_ { SubrelationSeparator alpha; // a challenge whose powers are used to batch subrelation contributions during Sumcheck RelationParameters relation_parameters; std::vector gate_challenges; - // The target sum, which is typically nonzero for a ProtogalaxyProver's accumulator - FF target_sum{ 0 }; WitnessCommitments witness_commitments; CommitmentLabels commitment_labels; @@ -89,14 +80,13 @@ template class VerifierInstance_ { this->relation_parameters.gamma); transcript.add_to_independent_hash_buffer(domain_separator + "verifier_inst_public_input_delta", this->relation_parameters.public_input_delta); - transcript.add_to_independent_hash_buffer(domain_separator + "verifier_inst_target_sum", this->target_sum); transcript.add_to_independent_hash_buffer(domain_separator + "verifier_inst_gate_challenges", this->gate_challenges); return transcript.hash_independent_buffer(); } - MSGPACK_FIELDS(vk, relation_parameters, alpha, is_complete, gate_challenges, target_sum, witness_commitments); + MSGPACK_FIELDS(vk, relation_parameters, alpha, is_complete, gate_challenges, witness_commitments); }; } // namespace bb diff --git a/barretenberg/docs/docs/explainers/advanced/chonk.md b/barretenberg/docs/docs/explainers/advanced/chonk.md index f354d2004238..5f6d8c0020f2 100644 --- a/barretenberg/docs/docs/explainers/advanced/chonk.md +++ b/barretenberg/docs/docs/explainers/advanced/chonk.md @@ -1,7 +1,7 @@ --- title: CHONK - Client-side Highly Optimized ploNK description: Learn about CHONK, Aztec's specialized proving system designed for client-side proving with low memory requirements and efficient recursion for private smart contract execution. -keywords: [chonk, plonk, proving system, recursive proofs, protogalaxy, goblin plonk, zero knowledge, aztec, client-side proving, hyperplonk, sumcheck] +keywords: [chonk, plonk, proving system, recursive proofs, hypernova, goblin plonk, zero knowledge, aztec, client-side proving, hyperplonk, sumcheck] image: https://hackmd.io/_uploads/BkpsblXEgg.jpg sidebar_position: 1 --- @@ -34,13 +34,13 @@ A statement about contract execution will translate to multiple circuits - repre This eliminates FFT's and reduces prover time and memory at the expense of proof length. This approach is the main theme of the [hyperplonk paper](https://eprint.iacr.org/2022/1355). -### 3. Using the protogalaxy (PG) folding scheme +### 3. Using the Hypernova (HN) folding scheme -Folding schemes enable cheaper recursion than standard recursive proofs. They work most smoothly with elliptic-curve based proofs systems like CHONK. We specifically work with [protogalaxy](https://eprint.iacr.org/2023/1106) which is convenient and efficient for folding non-uniform PlonK circuits (i.e. not a fixed repeating circuit). +Folding schemes enable cheaper recursion than standard recursive proofs. They work most smoothly with elliptic-curve based proofs systems like CHONK. We specifically work with [HyperNova](https://eprint.iacr.org/2023/573) which is convenient and efficient for folding non-uniform PlonK circuits (i.e. not a fixed repeating circuit). -### 4. Enhancing PG with "Goblin plonk" +### 4. Enhancing HN with "Goblin plonk" -Though PG (as do other folding schemes) already facilitates efficient recursion, it can still be a bit heavy client-side due to the non-native elliptic curve scalar multiplications performed by the folding verifier. For this reason, we use a "lazy" version of PG where the verifier doesn't perform these operations, but rather simply adds them to a queue of EC operations that need to be performed at the final proving stage. We call this deferral mechanism [*Goblin Plonk*](https://hackmd.io/@aztec-network/BkGNaHUJn/%2FdUsu57SOTBiQ4tS9KJMkMQ) (GP) (see also [this paper](https://eprint.iacr.org/2024/1651)). +Though HN (as do other folding schemes) already facilitates efficient recursion, it can still be a bit heavy client-side due to the non-native elliptic curve scalar multiplications performed by the folding verifier. For this reason, we use a "lazy" version of HN where the verifier doesn't perform these operations, but rather simply adds them to a queue of EC operations that need to be performed at the final proving stage. We call this deferral mechanism [*Goblin Plonk*](https://hackmd.io/@aztec-network/BkGNaHUJn/%2FdUsu57SOTBiQ4tS9KJMkMQ) (GP) (see also [this paper](https://eprint.iacr.org/2024/1651)). The advantage of GP is that at this final stage we transition to another elliptic curve called Grumpkin where these operations are more efficient. This curve-switch approach was initiated by [BCTV](https://eprint.iacr.org/2014/595.pdf), and a good example of it in the modern folding context is [CycleFold](https://eprint.iacr.org/2023/1192). GP is arguably simpler than CycleFold where we switch back and forth between the curves at every iteration of the IVC. The approaches are however incomparable, and for example, CycleFold has the advantage of the final IPA verifier size not growing with the number of iterations. (Although this verifier can be run server-side once for all client proofs using the [Halo](https://eprint.iacr.org/2019/1021)/[BCMS](https://eprint.iacr.org/2020/499) accumulation mechanism.) diff --git a/barretenberg/noir/bb_proof_verification/src/lib.nr b/barretenberg/noir/bb_proof_verification/src/lib.nr index 41dfb539ab7a..e38e494e4a55 100644 --- a/barretenberg/noir/bb_proof_verification/src/lib.nr +++ b/barretenberg/noir/bb_proof_verification/src/lib.nr @@ -21,11 +21,15 @@ pub global RECURSIVE_ZK_PROOF_LENGTH: u32 = 492 + 16; pub type UltraHonkZKProof = [Field; RECURSIVE_ZK_PROOF_LENGTH]; -pub fn verify_ultrahonk_proof( +// Verifies a non-zero-knowledge UltraHonk proof. +// +// Represents standard UltraHonk recursive verification for proofs that do not hide the witness. +// Use this only in situations where zero-knowledge is not required. +pub fn verify_honk_proof_non_zk( verification_key: UltraHonkVerificationKey, proof: UltraHonkProof, public_inputs: [Field; N], - key_hash: Field, + key_hash: Field, // Hash of the verification key ) { std::verify_proof_with_type( verification_key, @@ -36,11 +40,15 @@ pub fn verify_ultrahonk_proof( ); } +// Verifies a non-zero-knowledge Rollup UltraHonk proof with IPA (Inner Product Argument). +// +// This variant includes an IPA claim and proof appended to the standard UltraHonk proof, +// used to amortize IPA recursive verification costs in rollup circuits. pub fn verify_rolluphonk_proof( verification_key: RollupHonkVerificationKey, proof: RollupHonkProof, public_inputs: [Field; N], - key_hash: Field, + key_hash: Field, // Hash of the verification key ) { std::verify_proof_with_type( verification_key, @@ -51,11 +59,17 @@ pub fn verify_rolluphonk_proof( ); } -pub fn verify_ultrahonkzk_proof( +// Verifies a zero-knowledge UltraHonk proof. +// +// This verifier is for UltraHonk proofs constructed with zero-knowledge, which hide the witness +// values from the verifier. +// Note: We intentionally choose the generic name "verify_honk_proof" for this function, as we +// want ZK to be the default unless the user explicitly opts out. +pub fn verify_honk_proof( verification_key: UltraHonkVerificationKey, proof: UltraHonkZKProof, public_inputs: [Field; N], - key_hash: Field, + key_hash: Field, // Hash of the verification key ) { std::verify_proof_with_type( verification_key, diff --git a/cspell.json b/cspell.json index 929da0b01d0c..11f5be22c3b2 100644 --- a/cspell.json +++ b/cspell.json @@ -153,6 +153,7 @@ "herskind", "hevm", "homomorphic", + "hypernova", "ierc", "IGSE", "incentivized", diff --git a/noir-projects/noir-protocol-circuits/crates/hiding-kernel-to-public/src/main.nr b/noir-projects/noir-protocol-circuits/crates/hiding-kernel-to-public/src/main.nr index 729492dbfea6..5a2d34856bd1 100644 --- a/noir-projects/noir-protocol-circuits/crates/hiding-kernel-to-public/src/main.nr +++ b/noir-projects/noir-protocol-circuits/crates/hiding-kernel-to-public/src/main.nr @@ -11,7 +11,7 @@ pub struct HidingKernelToPublicPrivateInputs { impl HidingKernelToPublicPrivateInputs { pub fn execute(self) { - // Verify previous kernel, which contains a protogalaxy proof and a decider proof. + // Verify previous kernel, which contains a folding proof and a decider proof. // The associated public inputs and proofs are linked through the verification queue in the backend. // The proof type `PROOF_TYPE_HN_FINAL` designates the final proof in the client IVC sequence. if !std::runtime::is_unconstrained() { diff --git a/noir-projects/noir-protocol-circuits/crates/hiding-kernel-to-rollup/src/main.nr b/noir-projects/noir-protocol-circuits/crates/hiding-kernel-to-rollup/src/main.nr index f6f355c44ac0..84d21ee044eb 100644 --- a/noir-projects/noir-protocol-circuits/crates/hiding-kernel-to-rollup/src/main.nr +++ b/noir-projects/noir-protocol-circuits/crates/hiding-kernel-to-rollup/src/main.nr @@ -11,7 +11,7 @@ pub struct HidingKernelToRollupPrivateInputs { impl HidingKernelToRollupPrivateInputs { pub fn execute(self) { - // Verify previous kernel, which contains a protogalaxy proof and a decider proof. + // Verify previous kernel, which contains a folding proof and a decider proof. // The associated public inputs and proofs are linked through the verification queue in the backend. // The proof type `PROOF_TYPE_HN_FINAL` designates the final proof in the chonk sequence. if !std::runtime::is_unconstrained() {