Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
11e848d
create file/folder
iakovenkos Apr 8, 2025
3d5de5a
added indicator array computation method + test template
iakovenkos Apr 9, 2025
44cb8c6
fix build
iakovenkos Apr 9, 2025
174e881
tests + rename
iakovenkos Apr 9, 2025
4b15753
Merge branch 'master' into si/bit-by-bit-for-witness-circuit-sizes
iakovenkos Apr 9, 2025
3ce110d
undo changes in UH recursive
iakovenkos Apr 9, 2025
1b4a34d
fix build
iakovenkos Apr 9, 2025
ff18da4
rename+docs
iakovenkos Apr 10, 2025
56b25b2
stray info
iakovenkos Apr 10, 2025
c33a1b0
split shplemini into padding/non padding
iakovenkos Apr 10, 2025
f99869b
small fix for barycentric data to access inverted denominators
iakovenkos Apr 10, 2025
c2b45b7
fix doxygen
iakovenkos Apr 10, 2025
239190f
fix build
iakovenkos Apr 10, 2025
ca593e0
Merge branch 'si/bit-by-bit-for-witness-circuit-sizes' into si/use-in…
iakovenkos Apr 10, 2025
8df3c22
integrating
iakovenkos Apr 10, 2025
aa4c0f9
fix off-by-one issue
iakovenkos Apr 10, 2025
0fbe409
Merge branch 'si/bit-by-bit-for-witness-circuit-sizes' into si/use-in…
iakovenkos Apr 10, 2025
3613f19
sumcheck fine?
iakovenkos Apr 10, 2025
d58dbdd
Merge branch 'master' into si/bit-by-bit-for-witness-circuit-sizes
iakovenkos Apr 11, 2025
a3fd9a6
Merge branch 'si/bit-by-bit-for-witness-circuit-sizes' into si/use-in…
iakovenkos Apr 11, 2025
e0c1aaa
dummy round removal
iakovenkos Apr 11, 2025
d079e10
Merge branch 'master' into si/use-indicator-padding-array-instead-of-…
iakovenkos Apr 21, 2025
75877c2
better separation of padding and non-padding + simplifications
iakovenkos Apr 21, 2025
ce290bc
docs + simplify sumcheck logic
iakovenkos Apr 21, 2025
dac6788
Merge branch 'master' into si/use-indicator-padding-array-instead-of-…
iakovenkos Apr 21, 2025
f11dd3d
fixing tests
iakovenkos Apr 22, 2025
ff20099
clean up sumcheck verifier constructors
iakovenkos Apr 22, 2025
da05691
add constraints
iakovenkos Apr 22, 2025
bacaae4
fix AcirHonkRecursionConstraint tests
iakovenkos Apr 22, 2025
266e165
Merge branch 'master' into si/use-indicator-padding-array-instead-of-…
iakovenkos Apr 22, 2025
b26a7e3
template padding indicator array on curve to support native
iakovenkos Apr 23, 2025
5f317e1
replace log_n with padding array everywhere, remove redundant code
iakovenkos Apr 23, 2025
6fa8d9c
more clean-up
iakovenkos Apr 23, 2025
d99580e
Merge branch 'master' into si/use-indicator-padding-array-instead-of-…
iakovenkos Apr 23, 2025
cc0edd7
minor cleanup
ledwards2225 Apr 23, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -366,8 +366,6 @@ template <typename Curve> class GeminiVerifier_ {
{
const size_t log_n = multilinear_challenge.size();
const bool has_interleaved = claim_batcher.interleaved.has_value();
// GeminiVerifier is only used in tests, so no padding required.
static constexpr bool use_padding = false;

const Fr rho = transcript->template get_challenge<Fr>("rho");

Expand Down Expand Up @@ -450,10 +448,11 @@ template <typename Curve> class GeminiVerifier_ {
p_pos = transcript->template receive_from_prover<Fr>("Gemini:P_0_pos");
p_neg = transcript->template receive_from_prover<Fr>("Gemini:P_0_neg");
}
std::vector<Fr> padding_indicator_array(log_n, Fr{ 1 });

// Compute the evaluations Aₗ(r^{2ˡ}) for l = 0, ..., m-1
std::vector<Fr> gemini_fold_pos_evaluations = compute_fold_pos_evaluations<use_padding>(
log_n, batched_evaluation, multilinear_challenge, r_squares, evaluations, p_neg);
std::vector<Fr> gemini_fold_pos_evaluations = compute_fold_pos_evaluations(
padding_indicator_array, batched_evaluation, multilinear_challenge, r_squares, evaluations, p_neg);
// Extract the evaluation A₀(r) = A₀₊(r) + P₊(r^s)
auto full_a_0_pos = gemini_fold_pos_evaluations[0];
std::vector<OpeningClaim<Curve>> fold_polynomial_opening_claims;
Expand Down Expand Up @@ -539,27 +538,33 @@ template <typename Curve> class GeminiVerifier_ {
* In the case of interleaving, the first "negative" evaluation has to be corrected by the contribution from \f$
* P_{-}(-r^s)\f$, where \f$ s \f$ is the size of the group to be interleaved.
*
* @param log_n The log of the size of the polynomial being opened.
* This method uses `padding_indicator_array`, whose i-th entry is FF{1} if i < log_n and 0 otherwise.
* We use these entries to either assign `eval_pos_prev` the value `eval_pos` computed in the current iteration of
* the loop, or to propagate the batched evaluation of the multilinear polynomials to the next iteration. This
* ensures the correctnes of the computation of the required positive evaluations.
*
* To ensure that dummy evaluations cannot be used to tamper with the final batch_mul result, we multiply dummy
* positive evaluations by the entries of `padding_indicator_array`.
*
* @param padding_indicator_array An array with first log_n entries equal to 1, and the remaining entries are 0.
* @param batched_evaluation The evaluation of the batched polynomial at \f$ (u_0, \ldots, u_{d-1})\f$.
* @param evaluation_point Evaluation point \f$ (u_0, \ldots, u_{d-1}) \f$ padded to CONST_PROOF_SIZE_LOG_N.
* @param challenge_powers Powers of \f$ r \f$, \f$ r^2 \), ..., \( r^{2^{d-1}} \f$.
* @param fold_neg_evals Evaluations \f$ A_{i-1}(-r^{2^{i-1}}) \f$.
* @return Evaluation \f$ A_0(r) \f$.
* @return \f A_{i}}(r^{2^{i}})\f$ \f$ i = 0, \ldots, \text{virtual_log_n} - 1 \f$.
*/
template <bool use_padding>
static std::vector<Fr> compute_fold_pos_evaluations(
const size_t log_n,
const Fr& batched_evaluation,
std::span<const Fr> evaluation_point, // CONST_PROOF_SIZE
std::span<const Fr> challenge_powers, // r_squares CONST_PROOF_SIZE_LOG_N
std::span<const Fr> fold_neg_evals,
Fr p_neg = Fr(0))
static std::vector<Fr> compute_fold_pos_evaluations(std::span<const Fr> padding_indicator_array,
const Fr& batched_evaluation,
std::span<const Fr> evaluation_point, // size = virtual_log_n
std::span<const Fr> challenge_powers, // size = virtual_log_n
std::span<const Fr> fold_neg_evals, // size = virtual_log_n
Fr p_neg = Fr(0))
{
const size_t virtual_log_n = evaluation_point.size();

std::vector<Fr> evals(fold_neg_evals.begin(), fold_neg_evals.end());

Fr eval_pos_prev = batched_evaluation;
// Virtual size allows padding in Shplemini.
const size_t virtual_log_n = use_padding ? evaluation_point.size() : log_n;

Fr zero{ 0 };
if constexpr (Curve::is_stdlib_type) {
Expand All @@ -568,8 +573,6 @@ template <typename Curve> class GeminiVerifier_ {

std::vector<Fr> fold_pos_evaluations;
fold_pos_evaluations.reserve(virtual_log_n);
// Either a computed eval of A_i at r^{2^i}, or 0
Fr value_to_emplace;

// Add the contribution of P-((-r)ˢ) to get A_0(-r), which is 0 if there are no interleaved polynomials
evals[0] += p_neg;
Expand All @@ -585,29 +588,14 @@ template <typename Curve> class GeminiVerifier_ {
Fr eval_pos = ((challenge_power * eval_pos_prev * 2) - eval_neg * (challenge_power * (Fr(1) - u) - u));
// Divide by the denominator
eval_pos *= (challenge_power * (Fr(1) - u) + u).invert();
if constexpr (use_padding) {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

split this method into two, this one uses real log_n which is ok in native context and when log_n is a constexpr integer as in ECCVM and Translator, the method below handles padding in stdlib_context. seems cleaner this way

if constexpr (Curve::is_stdlib_type) {
auto builder = evaluation_point[0].get_context();
// TODO(https://github.com/AztecProtocol/barretenberg/issues/1114): insecure dummy_round derivation!
stdlib::bool_t dummy_round = stdlib::witness_t(builder, l > log_n);
// If current index is bigger than log_n, we propagate `batched_evaluation` to the next
// round. Otherwise, current `eval_pos` A₍ₗ₋₁₎(−r²⁽ˡ⁻¹⁾) becomes `eval_pos_prev` in the round l-2.
eval_pos_prev = Fr::conditional_assign(dummy_round, eval_pos_prev, eval_pos);
// If current index is bigger than log_n, we emplace 0, which is later multiplied against
// Commitment::one().
value_to_emplace = Fr::conditional_assign(dummy_round, zero, eval_pos_prev);

} else {
// Perform the same logic natively
bool dummy_round = l > log_n;
eval_pos_prev = dummy_round ? eval_pos_prev : eval_pos;
value_to_emplace = dummy_round ? zero : eval_pos_prev;
};
} else {
eval_pos_prev = eval_pos;
value_to_emplace = eval_pos_prev;
}
fold_pos_evaluations.emplace_back(value_to_emplace);

// If current index is bigger than log_n, we propagate `batched_evaluation` to the next
// round. Otherwise, current `eval_pos` A₍ₗ₋₁₎(−r²⁽ˡ⁻¹⁾) becomes `eval_pos_prev` in the round l-2.
eval_pos_prev =
padding_indicator_array[l - 1] * eval_pos + (Fr{ 1 } - padding_indicator_array[l - 1]) * eval_pos_prev;
// If current index is bigger than log_n, we emplace 0, which is later multiplied against
// Commitment::one().
fold_pos_evaluations.emplace_back(padding_indicator_array[l - 1] * eval_pos_prev);
}

std::reverse(fold_pos_evaluations.begin(), fold_pos_evaluations.end());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,6 @@ using Curve = curve::Grumpkin;

class IPATest : public CommitmentTest<Curve> {
public:
static constexpr bool USE_PADDING = false;

using Fr = typename Curve::ScalarField;
using GroupElement = typename Curve::Element;
using CK = CommitmentKey<Curve>;
Expand All @@ -25,7 +23,7 @@ class IPATest : public CommitmentTest<Curve> {
using ShplonkVerifier = ShplonkVerifier_<Curve>;
using GeminiProver = GeminiProver_<Curve>;
using GeminiVerifier = GeminiVerifier_<Curve>;
using ShpleminiVerifier = ShpleminiVerifier_<Curve, USE_PADDING>;
using ShpleminiVerifier = ShpleminiVerifier_<Curve>;
using ClaimBatcher = ClaimBatcher_<Curve>;
using ClaimBatch = ClaimBatcher::Batch;

Expand Down Expand Up @@ -305,8 +303,14 @@ TEST_F(IPATest, ShpleminiIPAWithShift)

auto verifier_transcript = NativeTranscript::verifier_init_empty(prover_transcript);

const auto batch_opening_claim = ShpleminiVerifier::compute_batch_opening_claim(
small_log_n, mock_claims.claim_batcher, mle_opening_point, vk->get_g1_identity(), verifier_transcript);
std::array<Fr, small_log_n> padding_indicator_array;
std::ranges::fill(padding_indicator_array, Fr{ 1 });

const auto batch_opening_claim = ShpleminiVerifier::compute_batch_opening_claim(padding_indicator_array,
mock_claims.claim_batcher,
mle_opening_point,
vk->get_g1_identity(),
verifier_transcript);

auto result = PCS::reduce_verify_batch_opening_claim(batch_opening_claim, vk, verifier_transcript);
// auto result = PCS::reduce_verify(vk, shplonk_verifier_claim, verifier_transcript);
Expand Down Expand Up @@ -357,7 +361,10 @@ TEST_F(IPATest, ShpleminiIPAShiftsRemoval)
// vectors corresponding to the "shifted" commitment
auto verifier_transcript = NativeTranscript::verifier_init_empty(prover_transcript);

const auto batch_opening_claim = ShpleminiVerifier::compute_batch_opening_claim(small_log_n,
std::array<Fr, small_log_n> padding_indicator_array;
std::ranges::fill(padding_indicator_array, Fr{ 1 });

const auto batch_opening_claim = ShpleminiVerifier::compute_batch_opening_claim(padding_indicator_array,
mock_claims.claim_batcher,
mle_opening_point,
vk->get_g1_identity(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ using Curve = curve::BN254;

class KZGTest : public CommitmentTest<Curve> {
public:
static constexpr bool USE_PADDING = true;
using Fr = typename Curve::ScalarField;
using Commitment = typename Curve::AffineElement;
using PCS = KZG<curve::BN254>;
Expand All @@ -19,7 +18,7 @@ class KZGTest : public CommitmentTest<Curve> {
using ShplonkVerifier = ShplonkVerifier_<Curve>;
using GeminiProver = GeminiProver_<Curve>;
using GeminiVerifier = GeminiVerifier_<Curve>;
using ShpleminiVerifier = ShpleminiVerifier_<Curve, USE_PADDING>;
using ShpleminiVerifier = ShpleminiVerifier_<Curve>;

static constexpr size_t n = 16;
static constexpr size_t log_n = 4;
Expand Down Expand Up @@ -191,8 +190,14 @@ TEST_F(KZGTest, ShpleminiKzgWithShift)

// Gemini verifier output:
// - claim: d+1 commitments to Fold_{r}^(0), Fold_{-r}^(0), Fold^(l), d+1 evaluations a_0_pos, a_l, l = 0:d-1
const auto batch_opening_claim = ShpleminiVerifier::compute_batch_opening_claim(
log_n, mock_claims.claim_batcher, mle_opening_point, vk->get_g1_identity(), verifier_transcript);
std::array<Fr, log_n> padding_indicator_array;
std::ranges::fill(padding_indicator_array, Fr{ 1 });

const auto batch_opening_claim = ShpleminiVerifier::compute_batch_opening_claim(padding_indicator_array,
mock_claims.claim_batcher,
mle_opening_point,
vk->get_g1_identity(),
verifier_transcript);

const auto pairing_points = PCS::reduce_verify_batch_opening_claim(batch_opening_claim, verifier_transcript);
// Final pairing check: e([Q] - [Q_z] + z[W], [1]_2) = e([W], [x]_2)
Expand Down Expand Up @@ -239,7 +244,10 @@ TEST_F(KZGTest, ShpleminiKzgWithShiftAndInterleaving)

// Gemini verifier output:
// - claim: d+1 commitments to Fold_{r}^(0), Fold_{-r}^(0), Fold^(l), d+1 evaluations a_0_pos, a_l, l = 0:d-1
const auto batch_opening_claim = ShpleminiVerifier::compute_batch_opening_claim(log_n,
std::array<Fr, log_n> padding_indicator_array;
std::ranges::fill(padding_indicator_array, Fr{ 1 });

const auto batch_opening_claim = ShpleminiVerifier::compute_batch_opening_claim(padding_indicator_array,
mock_claims.claim_batcher,
mle_opening_point,
vk->get_g1_identity(),
Expand Down Expand Up @@ -306,7 +314,10 @@ TEST_F(KZGTest, ShpleminiKzgShiftsRemoval)

// Gemini verifier output:
// - claim: d+1 commitments to Fold_{r}^(0), Fold_{-r}^(0), Fold^(l), d+1 evaluations a_0_pos, a_l, l = 0:d-1
const auto batch_opening_claim = ShpleminiVerifier::compute_batch_opening_claim(log_n,
std::array<Fr, log_n> padding_indicator_array;
std::ranges::fill(padding_indicator_array, Fr{ 1 });

const auto batch_opening_claim = ShpleminiVerifier::compute_batch_opening_claim(padding_indicator_array,
mock_claims.claim_batcher,
mle_opening_point,
vk->get_g1_identity(),
Expand Down
Loading