Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
62 commits
Select commit Hold shift + click to select a range
d6ddd76
add analysis
iakovenkos Feb 4, 2026
2b87428
add interleaved pippenger
iakovenkos Feb 5, 2026
9ef3957
add benches
iakovenkos Feb 5, 2026
351f0e5
first approximation
iakovenkos Feb 5, 2026
6dd3845
next step smth
iakovenkos Feb 6, 2026
6e1a58e
manifests match
iakovenkos Feb 6, 2026
6f3b684
some ordering fixes
iakovenkos Feb 6, 2026
54c0cb2
add multipcs tests and polynomial shift support
iakovenkos Feb 6, 2026
9a04119
rm infos
iakovenkos Feb 6, 2026
974af8d
debugging
iakovenkos Feb 6, 2026
2c379d2
tests ok, cleanup
iakovenkos Feb 7, 2026
f81f69b
clean-up
iakovenkos Feb 7, 2026
1131d22
MultiMega ZK and recursive verification working
iakovenkos Feb 7, 2026
544c9e0
Enable REPEATED_COMMITMENTS optimization for MultiMega
iakovenkos Feb 8, 2026
710c642
bench + upd md
iakovenkos Feb 10, 2026
1ccbab0
Merge remote-tracking branch 'origin/merge-train/barretenberg' into s…
iakovenkos Feb 18, 2026
e67028f
Merge branch 'merge-train/barretenberg' into si/multipcs-proto
iakovenkos Feb 18, 2026
6eabcf3
delete stale mds
iakovenkos Feb 18, 2026
5a0d701
fixes after bad merge
iakovenkos Feb 18, 2026
27544b2
Merge remote-tracking branch 'origin/merge-train/barretenberg' into s…
iakovenkos Feb 19, 2026
cd36385
backport
iakovenkos Feb 19, 2026
ac88439
rm clutter
iakovenkos Feb 19, 2026
6e505f1
Reduce MultiHonk duplication via inheritance and rename MultiMega -> …
iakovenkos Mar 11, 2026
613ade1
update tests
iakovenkos Mar 11, 2026
438c664
fix tests
iakovenkos Mar 12, 2026
06335d7
deduplicating
iakovenkos Mar 12, 2026
b2e7838
delete multi files
iakovenkos Mar 12, 2026
5f2cac8
delete multichonk md
iakovenkos Mar 12, 2026
8b77834
rm verbose shplemini interleaving tests, clean up verifier instance
iakovenkos Mar 12, 2026
e68d10f
unify further
iakovenkos Mar 12, 2026
1fedc22
Merge remote-tracking branch 'origin/merge-train/barretenberg' into s…
iakovenkos Mar 12, 2026
64ce4b3
fix a couple of merge issues
iakovenkos Mar 12, 2026
d1e52b8
prover and verifier are almost agnostic to the batch size
iakovenkos Mar 16, 2026
56ef0bc
rm branching from oink verifier
iakovenkos Mar 16, 2026
0f5257d
tiny clean up
iakovenkos Mar 16, 2026
08d9329
fix batched translator test# Please enter the commit message for your…
iakovenkos Mar 16, 2026
8e74526
Merge remote-tracking branch 'origin/merge-train/barretenberg' into s…
iakovenkos Mar 16, 2026
e553d05
fix merge conflict resolutions: adopt HasGeminiMasking, update batche…
iakovenkos Mar 16, 2026
5d82742
fix build/merge issues
iakovenkos Mar 16, 2026
218db6a
test fixes
iakovenkos Mar 16, 2026
4b759ac
mem efficient interleaving
iakovenkos Mar 17, 2026
87acc7f
fix megahonk<4>
iakovenkos Mar 17, 2026
ed0ee73
decluttering
iakovenkos Mar 17, 2026
866ae66
rm dead code
iakovenkos Mar 17, 2026
49f1c1e
moore clean up
iakovenkos Mar 17, 2026
f131a2f
Merge remote-tracking branch 'origin/merge-train/barretenberg' into s…
iakovenkos Mar 17, 2026
b77cfe6
clean up
iakovenkos Mar 17, 2026
723ada8
fixes and deduplication
iakovenkos Mar 17, 2026
cca146f
tests green
iakovenkos Mar 18, 2026
b7c0124
format
iakovenkos Mar 18, 2026
542de10
rm clutter from prover polys
iakovenkos Mar 18, 2026
30de600
deduplicate proof length
iakovenkos Mar 18, 2026
7aacfcb
fix tests
iakovenkos Mar 18, 2026
6a33240
Merge branch 'merge-train/barretenberg' into si/multipcs-proto
iakovenkos Mar 18, 2026
47be4a8
fix docs
iakovenkos Mar 18, 2026
cb2384e
memory-efficient on-the-fly interleaving
iakovenkos Mar 18, 2026
d3d63e5
simplify
iakovenkos Mar 18, 2026
49dfb34
unified BS-agnostic oink prover and verifier
iakovenkos Mar 19, 2026
6b9a7e8
add DualMega (BS=2) interleaving flavor
iakovenkos Mar 19, 2026
841aa7c
add DualUltra (BS=2) interleaving flavor
iakovenkos Mar 19, 2026
a6298a7
rm shpl bench
iakovenkos Mar 19, 2026
43ae4ec
review fixes and enable DualUltra/DualUltraZK in UltraHonk tests
iakovenkos Mar 19, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,9 @@

#include "barretenberg/eccvm/eccvm_circuit_builder.hpp"
#include "barretenberg/eccvm/eccvm_prover.hpp"
#include "barretenberg/eccvm/eccvm_test_utils.hpp"
#include "barretenberg/eccvm/eccvm_verifier.hpp"
#include "barretenberg/srs/global_crs.hpp"

using namespace benchmark;
using namespace bb;
Expand Down Expand Up @@ -40,13 +42,16 @@ Builder generate_trace(size_t target_num_gates)
op_queue->merge();
}

// Add hiding op (required before creating the builder)
eccvm_test_utils::add_hiding_op_for_test(op_queue);
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

WHy was this not needed before?


Builder builder{ op_queue };
return builder;
}

void eccvm_generate_prover(State& state) noexcept
{

srs::init_file_crs_factory(bb::srs::bb_crs_path());
size_t target_num_gates = 1 << static_cast<size_t>(state.range(0));
for (auto _ : state) {
Builder builder = generate_trace(target_num_gates);
Expand All @@ -57,7 +62,7 @@ void eccvm_generate_prover(State& state) noexcept

void eccvm_prove(State& state) noexcept
{

srs::init_file_crs_factory(bb::srs::bb_crs_path());
size_t target_num_gates = 1 << static_cast<size_t>(state.range(0));
Builder builder = generate_trace(target_num_gates);
std::shared_ptr<Transcript> prover_transcript = std::make_shared<Transcript>();
Expand Down
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
#include <benchmark/benchmark.h>

#include "barretenberg/benchmark/ultra_bench/mock_circuits.hpp"
#include "barretenberg/common/bb_bench.hpp"
#include "barretenberg/stdlib_circuit_builders/mega_circuit_builder.hpp"
#include "barretenberg/ultra_honk/ultra_prover.hpp"

using namespace benchmark;
using namespace bb;

/**
* @brief Benchmark: Construction of a Ultra Honk proof for a circuit determined by the provided circuit function
* @brief Benchmark: Construction of a Mega Honk proof for a circuit determined by the provided circuit function
*/
static void construct_proof_megahonk(State& state, void (*test_circuit_function)(MegaCircuitBuilder&, size_t)) noexcept
{
Expand All @@ -16,16 +18,20 @@ static void construct_proof_megahonk(State& state, void (*test_circuit_function)
state, test_circuit_function, num_iterations);
}

/**
* @brief Benchmark: Construction of a Ultra Honk proof with 2**n gates
*/
static void construct_proof_megahonk_power_of_2(State& state) noexcept
{
auto log2_of_gates = static_cast<size_t>(state.range(0));
bb::mock_circuits::construct_proof_with_specified_num_iterations<MegaProver>(
state, &bb::mock_circuits::generate_basic_arithmetic_circuit<MegaCircuitBuilder>, log2_of_gates);
}

static void construct_proof_multi_megahonk_power_of_2(State& state) noexcept
{
auto log2_of_gates = static_cast<size_t>(state.range(0));
bb::mock_circuits::construct_proof_with_specified_num_iterations<MultiHonkProver>(
state, &bb::mock_circuits::generate_basic_arithmetic_circuit<MegaCircuitBuilder>, log2_of_gates);
}

static void get_row_power_of_2(State& state) noexcept
{
auto log2_of_gates = static_cast<size_t>(state.range(0));
Expand All @@ -41,24 +47,15 @@ static void get_row_power_of_2(State& state) noexcept
}
}

// Define benchmarks

// This exists due to an issue where get_row was blowing up in time
BENCHMARK_CAPTURE(construct_proof_megahonk, sha256, &generate_sha256_test_circuit<MegaCircuitBuilder>)
->Unit(kMillisecond);
BENCHMARK_CAPTURE(construct_proof_megahonk,
ecdsa_verification,
&stdlib::generate_ecdsa_verification_test_circuit<MegaCircuitBuilder>)
->Unit(kMillisecond);

BENCHMARK(get_row_power_of_2)
// 2**15 gates to 2**20 gates
->DenseRange(15, 20)
->Unit(kMillisecond);

BENCHMARK(construct_proof_megahonk_power_of_2)
// 2**15 gates to 2**20 gates
->DenseRange(15, 20)
->Unit(kMillisecond);
BENCHMARK(get_row_power_of_2)->DenseRange(15, 20)->Unit(kMillisecond);
BENCHMARK(construct_proof_megahonk_power_of_2)->DenseRange(15, 20)->Unit(kMillisecond);
BENCHMARK(construct_proof_multi_megahonk_power_of_2)->DenseRange(16, 19)->Unit(kMillisecond);

BENCHMARK_MAIN();
Original file line number Diff line number Diff line change
Expand Up @@ -321,16 +321,19 @@ void BatchedHonkTranslatorProver::execute_joint_pcs()
auto mega_zk_shifted = mega_zk_inst->polynomials.get_to_be_shifted();
auto trans_shifted = translator_key->proving_key->polynomials.get_pcs_to_be_shifted();
auto joint_shifted = concatenate(mega_zk_shifted, trans_shifted);
polynomial_batcher.set_to_be_shifted_by_one(joint_shifted);
polynomial_batcher.set_to_be_shifted(joint_shifted);

// Register MegaZK masking tails with the joint batcher
if (mega_zk_inst->masking_tail_data.is_active()) {
mega_zk_inst->masking_tail_data.add_tails_to_batcher(mega_zk_inst->polynomials, polynomial_batcher);
}

const auto rho = transcript->template get_challenge<typename Curve::ScalarField>("rho");

const OpeningClaim prover_opening_claim =
ShpleminiProver_<Curve>::prove(joint_circuit_size,
polynomial_batcher,
rho,
joint_challenge,
ck,
transcript,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,8 @@ typename BatchedHonkTranslatorVerifier_<Curve>::OinkResult BatchedHonkTranslator

return OinkResult{
.public_inputs = mega_zk_verifier_instance->public_inputs,
.calldata_commitment = mega_zk_verifier_instance->witness_commitments.calldata,
.ecc_op_wires = mega_zk_verifier_instance->witness_commitments.get_ecc_op_wires().get_copy(),
.calldata_commitment = mega_zk_verifier_instance->received_commitments.calldata,
.ecc_op_wires = mega_zk_verifier_instance->received_commitments.get_ecc_op_wires().get_copy(),
};
}

Expand Down Expand Up @@ -287,27 +287,31 @@ typename BatchedHonkTranslatorVerifier_<Curve>::ReductionResult BatchedHonkTrans
}
}();

// Translator claim components (translator first: its masking poly must be at position 0 for Shplemini offset=2).
// Translator claim components.
auto concat_shift_evals = TranslatorFlavor::reconstruct_concatenated_evaluations(
trans_evals.get_groups_to_be_concatenated_shifted(), std::span<const FF>(joint_challenge));

RefVector<Commitment> joint_unshifted_comms = trans_commitments.get_pcs_unshifted();
RefVector<FF> joint_unshifted_evals = trans_evals.get_pcs_unshifted();
auto trans_unshifted_comms = trans_commitments.get_pcs_unshifted();
auto trans_unshifted_evals = trans_evals.get_pcs_unshifted();
auto trans_shifted_comms = trans_commitments.get_pcs_to_be_shifted();
auto trans_pcs_shifted_evals = trans_evals.get_pcs_shifted();

// Build joint claim batchers: translator-first in unshifted, MegaZK-first in shifted (matching prover).
RefVector<Commitment> joint_unshifted_comms = trans_unshifted_comms;
RefVector<FF> joint_unshifted_evals = trans_unshifted_evals;

// Append MegaZK unshifted (no masking poly — translator provides the joint masking poly).
// Extend unshifted with MegaZK entries.
for (auto& comm : mega_zk_commitments.get_unshifted()) {
joint_unshifted_comms.push_back(comm);
}
for (auto& eval : mega_zk_evals.get_unshifted()) {
joint_unshifted_evals.push_back(eval);
}

// Shifted: MegaZK first, then translator.
// Shifted: MegaZK first, then translator (matching prover ordering).
RefVector<Commitment> joint_shifted_comms = mega_zk_commitments.get_to_be_shifted();
RefVector<FF> joint_shifted_evals = mega_zk_evals.get_shifted();

auto trans_shifted_comms = trans_commitments.get_pcs_to_be_shifted();
auto trans_pcs_shifted_evals = trans_evals.get_pcs_shifted();
for (auto& comm : trans_shifted_comms) {
joint_shifted_comms.push_back(comm);
}
Expand Down Expand Up @@ -351,7 +355,8 @@ typename BatchedHonkTranslatorVerifier_<Curve>::ReductionResult BatchedHonkTrans
{
// Reconstruct MegaZK commitments from the stored verifier instance.
MegaZKVerifierCommitments mega_zk_commitments{ mega_zk_verifier_instance->get_vk(),
mega_zk_verifier_instance->witness_commitments };
mega_zk_verifier_instance->received_commitments };

auto trans_commitments = verify_translator_oink(
joint_proof, evaluation_input_x, batching_challenge_v, accumulated_result, op_queue_wire_commitments);
bool sumcheck_verified = verify_joint_sumcheck();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,32 +57,48 @@ template <typename Curve> class BatchedHonkTranslatorVerifier_ {
using TransBF = typename TransFlavor::BF;

// Joint RepeatedCommitmentsData for Shplemini's remove_repeated_commitments optimization.
// Joint unshifted = [Trans_unshifted(TU), MZK_unshifted(P+W)]. The translator's gemini_masking_poly
// is at position 0 of unshifted and is consumed by Shplemini's offset=2 (Q + masking).
// After Shplemini's offset=2, the virtual layout is:
// Unshifted: [Trans_rest(TU-1) | MZK_precomputed(P) | MZK_witness(W)]
// Shifted: [MZK_shifted(S) | Trans_shifted(TS)]
// After Shplemini's offset=2 (Q_commitment + translator_masking_poly), the virtual layout is:
// Unshifted: [Trans_unshifted_no_masking(TU-1) | MegaZK_precomputed(P) | MegaZK_witness(W)]
// Shifted: [MegaZK_shifted(S) | Trans_shifted(TS)]
//
// Range 1 (Translator merged): ordered(5)+z_perm(1)+concat(5) in unshifted ↔ same in shifted
// Range 2 (MegaZK): witness[0..S-1] ↔ mega_zk_shifted[0..S-1]
// Range 1 (Translator): ordered(5)+z_perm(1) in unshifted ↔ same in shifted
// Range 2 (Translator): concat(5) in unshifted ↔ same in shifted
// Combined into two DuplicateRanges.
static constexpr RepeatedCommitmentsData REPEATED_COMMITMENTS = [] {
constexpr size_t TU = TranslatorFlavor::NUM_PCS_UNSHIFTED; // includes masking(1)
constexpr size_t P = MegaZKFlavorT::NUM_PRECOMPUTED_ENTITIES;
constexpr size_t W = MegaZKFlavorT::NUM_WITNESS_ENTITIES;
constexpr size_t W = MegaZKFlavorT::REPEATED_COMMITMENTS.first.duplicate_start -
MegaZKFlavorT::REPEATED_COMMITMENTS.first.original_start;
constexpr size_t S = MegaZKFlavorT::NUM_SHIFTED_ENTITIES;
// Translator repeated: ordered(5)+z_perm(1)+concat(5) in Trans_rest ↔ Trans_shifted
// Trans_rest starts at virtual 0; repeated starts at ordered_extra(1)+op(1)=2
constexpr size_t TRANS_REPEAT_START = TranslatorFlavor::REPEATED_COMMITMENTS.first.original_start;
constexpr size_t TRANS_REPEAT_COUNT =
TranslatorFlavor::REPEATED_COMMITMENTS.first.count + TranslatorFlavor::REPEATED_COMMITMENTS.second.count;
// In shifted section: op_queue entries precede the repeated entries
constexpr size_t TRANS_SHIFTED_SKIP = TranslatorFlavor::NUM_PCS_TO_BE_SHIFTED - TRANS_REPEAT_COUNT;
return RepeatedCommitmentsData(TRANS_REPEAT_START, // Translator original in unshifted
(TU - 1) + P + W + S + TRANS_SHIFTED_SKIP, // Translator duplicate in shifted
TRANS_REPEAT_COUNT, // Translator count
(TU - 1) + P, // MegaZK original: witness start in unshifted
(TU - 1) + P + W, // MegaZK duplicate: shifted start
S); // MegaZK count
constexpr size_t TU = TranslatorFlavor::NUM_PCS_UNSHIFTED;
constexpr size_t TS = TranslatorFlavor::NUM_PCS_TO_BE_SHIFTED;
// In the joint prover poly array (after offset=2 consumes Q + trans masking):
// Unshifted: [ordered_extra(1), op(1), ordered(5), z_perm(1), concat(5), mega_precomputed(P), mega_witness(W)]
// Shifted: [mega_shifted(S), op_queue(3), ordered(5), z_perm(1), concat(5)]
//
// Translator repeated:
// Range 1: ordered(5)+z_perm(1) at unshifted[2..7] ↔ shifted[(TU-1)+P+W+S+3..(TU-1)+P+W+S+8]
// Range 2: concat(5) at unshifted[8..12] ↔ shifted[(TU-1)+P+W+S+9..(TU-1)+P+W+S+13]
// MegaZK repeated:
// witness ↔ mega_shifted: unshifted[(TU-1)+P-1..] ↔ shifted[(TU-1)+P+W-1..]
// (using MegaZK standalone offsets P-1, P+W-1 shifted by TU-1)
//
// We use two DuplicateRanges. Range 1 = translator ordered+z_perm+concat, Range 2 = MegaZK.
constexpr size_t TRANS_ORIG_START =
TranslatorFlavor::REPEATED_COMMITMENTS.first.original_start; // 2 (ordered[0])
constexpr size_t TRANS_TOTAL_COUNT = TranslatorFlavor::REPEATED_COMMITMENTS.first.count +
TranslatorFlavor::REPEATED_COMMITMENTS.second.count; // 6+5=11
// op_queue skip in shifted = 3
constexpr size_t TRANS_SHIFTED_SKIP = TS - TRANS_TOTAL_COUNT; // 14-11 = 3 (op_queue wires are not repeated)
constexpr size_t MEGA_ZK_ORIG = (TU - 1) + MegaZKFlavorT::REPEATED_COMMITMENTS.first.original_start;
constexpr size_t MEGA_ZK_DUP = (TU - 1) + MegaZKFlavorT::REPEATED_COMMITMENTS.first.duplicate_start;
return RepeatedCommitmentsData(TRANS_ORIG_START, // Trans original: ordered[0] in unshifted
(TU - 1) + P + W + S +
TRANS_SHIFTED_SKIP, // Trans duplicate: ordered[0] in shifted
TRANS_TOTAL_COUNT, // Trans count: 11
MEGA_ZK_ORIG, // MegaZK original: witness start in unshifted
MEGA_ZK_DUP, // MegaZK duplicate: mega_shifted start in shifted
S, // MegaZK count
2 /* shplemini_offset: Q + translator_masking_poly */);
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Where is the 2 coming from?

}();

/**
Expand Down
2 changes: 1 addition & 1 deletion barretenberg/cpp/src/barretenberg/chonk/chonk.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ Chonk::perform_recursive_verification_and_databus_consistency_checks(
}

// Extract the witness commitments and public inputs from the incoming verifier instance
WitnessCommitments witness_commitments = std::move(verifier_instance->witness_commitments);
WitnessCommitments witness_commitments = std::move(verifier_instance->received_commitments);
std::vector<StdlibFF> public_inputs = std::move(verifier_instance->public_inputs);

if (verifier_inputs.is_kernel) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,8 @@ template <typename Curve> struct ClaimBatcher_ {
};

std::optional<Batch> unshifted; // commitments and evaluations of unshifted polynomials
std::optional<Batch> shifted; // commitments of to-be-shifted-by-1 polys, evals of their shifts
std::optional<Batch> shifted; // commitments of to-be-shifted polys, evals of their shifts
size_t shift_exponent = 1; // shift depth: 1 for standard (G/X), k for interleaved (G/X^k)

Batch get_unshifted() { return (unshifted) ? *unshifted : Batch{}; }
Batch get_shifted() { return (shifted) ? *shifted : Batch{}; }
Expand All @@ -47,9 +48,10 @@ template <typename Curve> struct ClaimBatcher_ {
* @details Computes scalars s_0, s_1 given by
* \f[
* - s_0 = \left(\frac{1}{z-r} + \nu \times \frac{1}{z+r}\right) \f],
* - s_1 = \frac{1}{r} \times \left(\frac{1}{z-r} - \nu \times \frac{1}{z+r}\right)
* - s_1 = \frac{1}{r^k} \times \left(\frac{1}{z-r} - \nu \times \frac{1}{z+r}\right)
* \f]
* where the scalars used to batch the claims are given by
* where k is the shift_exponent member (1 for standard shifts, BS for interleaved polynomials),
* and the scalars used to batch the claims are given by
* \f[
* \left(
* - s_0,
Expand Down Expand Up @@ -77,9 +79,25 @@ template <typename Curve> struct ClaimBatcher_ {
unshifted->scalar = inverse_vanishing_eval_pos + nu_challenge * inverse_vanishing_eval_neg;
}
if (shifted) {
// r⁻¹ ⋅ (1/(z−r) − ν/(z+r))
shifted->scalar =
r_challenge.invert() * (inverse_vanishing_eval_pos - nu_challenge * inverse_vanishing_eval_neg);
// r⁻ᵏ ⋅ (1/(z−r) + (-1)^k ⋅ ν/(z+r)) where k is the shift_exponent
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe it's worth giving some more details here, like why is it (-1)^k and not always -1

// This comes from A₀₋(X) = F(X) + (-1)^k · G(X)/r^k, needed because (-r)^k = (-1)^k · r^k
// For standard shifts k=1 (odd): r⁻¹ ⋅ (1/(z−r) − ν/(z+r))
// For interleaved shifts k=4 (even): r⁻⁴ ⋅ (1/(z−r) + ν/(z+r))
if (shift_exponent == 1) {
// Fast path: avoid extra multiplication by neg_sign (important for recursive verifiers)
shifted->scalar =
r_challenge.invert() * (inverse_vanishing_eval_pos - nu_challenge * inverse_vanishing_eval_neg);
} else {
Fr r_power = r_challenge;
for (size_t i = 1; i < shift_exponent; ++i) {
r_power *= r_challenge;
}
const Fr r_inv_shift = r_power.invert();
// (-1)^k: even k gives +1, odd k gives -1 (but k=1 handled above)
const Fr neg_sign = (shift_exponent % 2 == 0) ? Fr(1) : Fr(-1);
shifted->scalar =
r_inv_shift * (inverse_vanishing_eval_pos + neg_sign * nu_challenge * inverse_vanishing_eval_neg);
}
}
}
/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,42 @@ template <class Curve> class CommitmentKey {
};

CommitBatch start_batch() { return CommitBatch{ this, {}, {} }; }

/**
* @brief Commit to an interleaved group of polynomials without materializing the full buffer.
* @details Computes [F] where F(X) = Σⱼ fⱼ(X^{batch_size}) · X^j for j=0..batch_size-1.
* If fewer than BATCH_SIZE chunks are provided, missing slots are zero.
* @param chunks Span of polynomial spans representing the group members
*/
template <size_t BATCH_SIZE> Commitment commit_interleaved(std::span<const PolynomialSpan<const Fr>> chunks) const
{
// BS=1: degenerate case — just commit the single polynomial directly
if constexpr (BATCH_SIZE == 1) {
BB_ASSERT(chunks.size() == 1, "commit_interleaved<1> expects exactly 1 chunk");
return commit(chunks[0]);
} else {
if (chunks.size() > BATCH_SIZE) {
throw_or_abort("commit_interleaved: chunks.size() must be <= BATCH_SIZE");
}
std::span<const Commitment> point_table = get_monomial_points();

size_t n = 0;
for (const auto& chunk : chunks) {
n = std::max(n, chunk.end_index());
}
const size_t total_size = n * BATCH_SIZE;

if (total_size > get_monomial_size()) {
throw_or_abort(format("Attempting to commit to interleaved polynomial that needs ",
total_size,
" points with an SRS of size ",
get_monomial_size()));
}

return scalar_multiplication::pippenger_interleaved<Curve>(
chunks, std::span<const Commitment>{ point_table.data(), total_size }, BATCH_SIZE);
}
}
};

} // namespace bb
Loading
Loading