Skip to content

Commit

Permalink
Refinement in IPA templating (#359)
Browse files Browse the repository at this point in the history
* templating: More accurate representing of CompressedCommitment

* chore: Fix CI failure on docs generating
  • Loading branch information
storojs72 authored Mar 8, 2024
1 parent 07f571e commit fe39652
Show file tree
Hide file tree
Showing 3 changed files with 34 additions and 8 deletions.
2 changes: 1 addition & 1 deletion src/provider/hyperkzg.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
//! This means that Spartan's polynomial IOP can use commit to its polynomials as-is without incurring any interpolations or FFTs.
//! (2) HyperKZG is specialized to use KZG as the univariate commitment scheme, so it includes several optimizations (both during the transformation of multilinear-to-univariate claims
//! and within the KZG commitment scheme implementation itself).
//! (3) HyperKZG also includes optimisation based on so called Shplonk/HaloInfinite technique (https://hackmd.io/@adrian-aztec/BJxoyeCqj#Phase-2-Gemini).
//! (3) HyperKZG also includes optimisation based on so called Shplonk/HaloInfinite technique (`<https://hackmd.io/@adrian-aztec/BJxoyeCqj#Phase-2-Gemini>`).
//! Compared to pure HyperKZG, this optimisation in theory improves prover (at cost of using 1 fixed KZG opening) and verifier (at cost of eliminating MSM)
//!
#![allow(non_snake_case)]
Expand Down
15 changes: 8 additions & 7 deletions src/provider/tests/ipa_pc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@
mod test {
use crate::provider::ipa_pc::EvaluationEngine;
use crate::provider::tests::solidity_compatibility_utils::{
ec_points_to_json, field_elements_to_json, generate_pcs_solidity_unit_test_data,
compressed_commitment_to_json, ec_points_to_json, field_elements_to_json,
generate_pcs_solidity_unit_test_data,
};

use crate::provider::GrumpkinEngine;
Expand Down Expand Up @@ -33,11 +34,11 @@ Grumpkin.GrumpkinAffinePoint[] memory ck_s = new Grumpkin.GrumpkinAffinePoint[](
uint256[] memory point = new uint256[]({{ len point }});
{{ #each point }} point[{{ i }}]={{ val }};\n {{ /each }}
Grumpkin.GrumpkinAffinePoint[] memory L_vec = new Grumpkin.GrumpkinAffinePoint[]({{ len L_vec }});
{{ #each L_vec }} L_vec[{{ i }}]=Grumpkin.GrumpkinAffinePoint({{ x }}, {{y}});\n {{ /each }}
uint256[] memory L_vec = new uint256[]({{ len L_vec }});
{{ #each L_vec }} L_vec[{{ i }}]={{ compressed }};\n {{ /each }}
Grumpkin.GrumpkinAffinePoint[] memory R_vec = new Grumpkin.GrumpkinAffinePoint[]({{ len R_vec }});
{{ #each R_vec }} R_vec[{{ i }}]=Grumpkin.GrumpkinAffinePoint({{ x }}, {{y}});\n {{ /each }}
uint256[] memory R_vec = new uint256[]({{ len R_vec }});
{{ #each R_vec }} R_vec[{{ i }}]={{ compressed }};\n {{ /each }}
uint256 a_hat = {{ a_hat }};
Expand Down Expand Up @@ -94,8 +95,8 @@ return keccak_transcript;
let l_vec = CommitmentKey::<GrumpkinEngine>::reinterpret_commitments_as_ck(&proof.L_vec)
.expect("can't reinterpred L_vec");

let r_vec_array = ec_points_to_json::<GrumpkinEngine>(&r_vec.ck);
let l_vec_array = ec_points_to_json::<GrumpkinEngine>(&l_vec.ck);
let r_vec_array = compressed_commitment_to_json::<GrumpkinEngine>(&r_vec.ck);
let l_vec_array = compressed_commitment_to_json::<GrumpkinEngine>(&l_vec.ck);
let point_array = field_elements_to_json::<GrumpkinEngine>(&point);
let ckv_array = ec_points_to_json::<GrumpkinEngine>(&vk.ck_v.ck);
let cks_array = ec_points_to_json::<GrumpkinEngine>(&vk.ck_s.ck);
Expand Down
25 changes: 25 additions & 0 deletions src/provider/tests/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ pub mod solidity_compatibility_utils {
};
use group::prime::PrimeCurve;
use group::prime::PrimeCurveAffine;
use group::GroupEncoding;
use rand::rngs::StdRng;
use serde_json::{Map, Value};
use std::sync::Arc;
Expand Down Expand Up @@ -121,4 +122,28 @@ pub mod solidity_compatibility_utils {
});
value_vector
}

pub(crate) fn compressed_commitment_to_json<E>(
ec_points: &[<E::GE as PrimeCurve>::Affine],
) -> Vec<Value>
where
E: Engine,
E::GE: DlogGroup<ScalarExt = E::Scalar>,
{
let mut value_vector = vec![];
ec_points.iter().enumerate().for_each(|(i, ec_point)| {
let mut value = Map::new();
let compressed_commitment_info = ec_point.to_curve().to_bytes();
let mut data = compressed_commitment_info.as_ref().to_vec();
data.reverse();

value.insert("i".to_string(), Value::String(i.to_string()));
value.insert(
"compressed".to_string(),
Value::String(format!("0x{}", hex::encode(data))),
);
value_vector.push(Value::Object(value));
});
value_vector
}
}

1 comment on commit fe39652

@github-actions
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Benchmarks

Table of Contents

Overview

This benchmark report shows the Arecibo GPU benchmarks.
NVIDIA L4
Intel(R) Xeon(R) CPU @ 2.20GHz
32 vCPUs
125 GB RAM
Workflow run: https://github.com/lurk-lab/arecibo/actions/runs/8203987339

Benchmark Results

RecursiveSNARK-NIVC-2

ref=07f571e ref=fe39652
Prove-NumCons-6540 44.89 ms (✅ 1.00x) 45.16 ms (✅ 1.01x slower)
Verify-NumCons-6540 35.64 ms (✅ 1.00x) 35.32 ms (✅ 1.01x faster)
Prove-NumCons-1028888 321.28 ms (✅ 1.00x) 324.66 ms (✅ 1.01x slower)
Verify-NumCons-1028888 251.67 ms (✅ 1.00x) 254.49 ms (✅ 1.01x slower)

CompressedSNARK-NIVC-Commitments-2

ref=07f571e ref=fe39652
Prove-NumCons-6540 10.77 s (✅ 1.00x) 10.82 s (✅ 1.00x slower)
Verify-NumCons-6540 51.74 ms (✅ 1.00x) 52.27 ms (✅ 1.01x slower)
Prove-NumCons-1028888 53.88 s (✅ 1.00x) 54.16 s (✅ 1.01x slower)
Verify-NumCons-1028888 51.54 ms (✅ 1.00x) 52.12 ms (✅ 1.01x slower)

Made with criterion-table

Please sign in to comment.