From 8ac25c7377dd743eb61aa3ef7475791aafaf7ce9 Mon Sep 17 00:00:00 2001 From: Al-Kindi-0 <82364884+Al-Kindi-0@users.noreply.github.com> Date: Tue, 6 Aug 2024 14:49:29 +0200 Subject: [PATCH 01/28] feat: math utilities needed for sum-check protocol --- Cargo.toml | 2 +- sumcheck/Cargo.toml | 27 ++++ sumcheck/src/lib.rs | 16 ++ sumcheck/src/prover/mod.rs | 6 + sumcheck/src/utils/mod.rs | 8 + sumcheck/src/utils/multilinear.rs | 229 +++++++++++++++++++++++++++ sumcheck/src/utils/univariate.rs | 252 ++++++++++++++++++++++++++++++ sumcheck/src/verifier/mod.rs | 5 + 8 files changed, 544 insertions(+), 1 deletion(-) create mode 100644 sumcheck/Cargo.toml create mode 100644 sumcheck/src/lib.rs create mode 100644 sumcheck/src/prover/mod.rs create mode 100644 sumcheck/src/utils/mod.rs create mode 100644 sumcheck/src/utils/multilinear.rs create mode 100644 sumcheck/src/utils/univariate.rs create mode 100644 sumcheck/src/verifier/mod.rs diff --git a/Cargo.toml b/Cargo.toml index b0ed3f07c..1b69d99bc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,7 +11,7 @@ members = [ "verifier", "winterfell", "examples" -] +, "sumcheck"] resolver = "2" [profile.release] diff --git a/sumcheck/Cargo.toml b/sumcheck/Cargo.toml new file mode 100644 index 000000000..c4f4c3c4f --- /dev/null +++ b/sumcheck/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "winter-sumcheck" +version = "0.1.0" +description = "Implementation of the sum-check protocol for the LogUp-GKR protocol" +authors = ["winterfell contributors"] +readme = "README.md" +license = "MIT" +repository = "https://github.com/novifinancial/winterfell" +documentation = "https://docs.rs/winter-sumcheck/0.1.0" +categories = ["cryptography", "no-std"] +keywords = ["crypto", "sumcheck", "iop"] +edition = "2021" +rust-version = "1.78" + +[features] +concurrent = ["utils/concurrent", "dep:rayon", "std"] +default = ["std"] +std = ["utils/std"] + +[dependencies] +math = { version = "0.9", path = "../math", package = "winter-math", default-features = false } +utils = { version = "0.9", path = "../utils/core", package = "winter-utils", default-features = false } +rayon = { version = "1.8", optional = true } + +[dev-dependencies] +criterion = "0.5" +rand-utils = { version = "0.9", path = "../utils/rand", package = "winter-rand-utils" } \ No newline at end of file diff --git a/sumcheck/src/lib.rs b/sumcheck/src/lib.rs new file mode 100644 index 000000000..64d44a2dc --- /dev/null +++ b/sumcheck/src/lib.rs @@ -0,0 +1,16 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + + +#![no_std] + +#[macro_use] +extern crate alloc; + +mod prover; + +mod verifier; + +mod utils; \ No newline at end of file diff --git a/sumcheck/src/prover/mod.rs b/sumcheck/src/prover/mod.rs new file mode 100644 index 000000000..e5b8995e4 --- /dev/null +++ b/sumcheck/src/prover/mod.rs @@ -0,0 +1,6 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + + diff --git a/sumcheck/src/utils/mod.rs b/sumcheck/src/utils/mod.rs new file mode 100644 index 000000000..41c63e1df --- /dev/null +++ b/sumcheck/src/utils/mod.rs @@ -0,0 +1,8 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +mod univariate; + +mod multilinear; \ No newline at end of file diff --git a/sumcheck/src/utils/multilinear.rs b/sumcheck/src/utils/multilinear.rs new file mode 100644 index 000000000..d6907d14c --- /dev/null +++ b/sumcheck/src/utils/multilinear.rs @@ -0,0 +1,229 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +use alloc::vec::Vec; +use core::ops::Index; +use math::FieldElement; + +#[cfg(feature = "concurrent")] +pub use rayon::prelude::*; + +// MULTI-LINEAR POLYNOMIAL +// ================================================================================================ + +/// Represents a multi-linear polynomial. +/// +/// The representation stores the evaluations of the polynomial over the boolean hyper-cube +/// ${0 , 1}^ν$. +#[derive(Clone, Debug, PartialEq)] +pub struct MultiLinearPoly { + evaluations: Vec, +} + +impl MultiLinearPoly { + /// Constructs a [`MultiLinearPoly`] from its evaluations over the boolean hyper-cube ${0 , 1}^ν$. + pub fn from_evaluations(evaluations: Vec) -> Self { + assert!(evaluations.len().is_power_of_two(), "A multi-linear polynomial should have a power of 2 number of evaluations over the Boolean hyper-cube"); + Self { evaluations } + } + + /// Returns the number of variables of the multi-linear polynomial. + pub fn num_variables(&self) -> usize { + self.evaluations.len().trailing_zeros() as usize + } + + /// Returns the evaluations over the boolean hyper-cube. + pub fn evaluations(&self) -> &[E] { + &self.evaluations + } + + /// Returns the number of evaluations. This is equal to the size of the boolean hyper-cube. + pub fn num_evaluations(&self) -> usize { + self.evaluations.len() + } + + /// Evaluate the multi-linear at some query $(r_0, ..., r_{ν - 1}) ∈ 𝔽^ν$. + /// + /// It first computes the evaluations of the Lagrange basis polynomials over the interpolating + /// set ${0 , 1}^ν$ at $(r_0, ..., r_{ν - 1})$ i.e., the Lagrange kernel at $(r_0, ..., r_{ν - 1})$. + /// The evaluation then is the inner product, indexed by ${0 , 1}^ν$, of the vector of + /// evaluations times the Lagrange kernel. + pub fn evaluate(&self, query: &[E]) -> E { + let tensored_query = compute_lagrange_basis_evals_at(query); + inner_product(&self.evaluations, &tensored_query) + } + + /// Similar to [`Self::evaluate`], except that the query was already turned into the Lagrange + /// kernel (i.e. the [`lagrange_ker::EqFunction`] evaluated at every point in the set + /// `${0 , 1}^ν$`). + /// + /// This is more efficient than [`Self::evaluate`] when multiple different [`MultiLinearPoly`] + /// need to be evaluated at the same query point. + pub fn evaluate_with_lagrange_kernel(&self, lagrange_kernel: &[E]) -> E { + inner_product(&self.evaluations, lagrange_kernel) + } + + /// Computes $f(r_0, y_1, ..., y_{ν - 1})$ using the linear interpolation formula + /// $(1 - r_0) * f(0, y_1, ..., y_{ν - 1}) + r_0 * f(1, y_1, ..., y_{ν - 1})$ and assigns + /// the resulting multi-linear, defined over a domain of half the size, to `self`. + pub fn bind_least_significant_variable(&mut self, round_challenge: E) { + let num_evals = self.evaluations.len() >> 1; + for i in 0..num_evals { + self.evaluations[i] = self.evaluations[i << 1] + + round_challenge * (self.evaluations[(i << 1) + 1] - self.evaluations[i << 1]); + } + self.evaluations.truncate(num_evals) + } + + /// Given the multilinear polynomial $f(y_0, y_1, ..., y_{ν - 1})$, returns two polynomials: + /// $f(0, y_1, ..., y_{ν - 1})$ and $f(1, y_1, ..., y_{ν - 1})$. + pub fn project_least_significant_variable(&self) -> (Self, Self) { + let mut p0 = Vec::with_capacity(self.num_evaluations() / 2); + let mut p1 = Vec::with_capacity(self.num_evaluations() / 2); + for chunk in self.evaluations.chunks_exact(2) { + p0.push(chunk[0]); + p1.push(chunk[1]); + } + + (MultiLinearPoly::from_evaluations(p0), MultiLinearPoly::from_evaluations(p1)) + } +} + +impl Index for MultiLinearPoly { + type Output = E; + + fn index(&self, index: usize) -> &E { + &(self.evaluations[index]) + } +} + +// EQ FUNCTION +// ================================================================================================ + +/// The EQ (equality) function is the binary function defined by +/// +/// $$ +/// EQ: {0 , 1}^ν ⛌ {0 , 1}^ν ⇾ {0 , 1} +/// ((x_0, ..., x_{ν - 1}), (y_0, ..., y_{ν - 1})) ↦ \prod_{i = 0}^{ν - 1} (x_i * y_i + (1 - x_i) +/// * (1 - y_i)) +/// $$ +/// +/// Taking its multi-linear extension $EQ^{~}$, we can define a basis for the set of multi-linear +/// polynomials in ν variables by +/// $${EQ^{~}(., (y_0, ..., y_{ν - 1})): (y_0, ..., y_{ν - 1}) ∈ {0 , 1}^ν}$$ +/// where each basis function is a function of its first argument. This is called the Lagrange or +/// evaluation basis for evaluation set ${0 , 1}^ν$. +/// +/// Given a function $(f: {0 , 1}^ν ⇾ 𝔽)$, its multi-linear extension (i.e., the unique +/// mult-linear polynomial extending `f` to $(f^{~}: 𝔽^ν ⇾ 𝔽)$ and agreeing with it on ${0 , 1}^ν$) is +/// defined as the summation of the evaluations of f against the Lagrange basis. +/// More specifically, given $(r_0, ..., r_{ν - 1}) ∈ 𝔽^ν$, then: +/// +/// $$ +/// f^{~}(r_0, ..., r_{ν - 1}) = \sum_{(y_0, ..., y_{ν - 1}) ∈ {0 , 1}^ν} +/// f(y_0, ..., y_{ν - 1}) EQ^{~}((r_0, ..., r_{ν - 1}), (y_0, ..., y_{ν - 1})) +/// $$ +/// +/// We call the Lagrange kernel the evaluation of the EQ^{~} function at +/// $((r_0, ..., r_{ν - 1}), (y_0, ..., y_{ν - 1}))$ for all $(y_0, ..., y_{ν - 1}) ∈ {0 , 1}^ν$ for +/// a fixed $(r_0, ..., r_{ν - 1}) ∈ 𝔽^ν$. +/// +/// [`EqFunction`] represents EQ^{~} the multi-linear extension of +/// +/// $((y_0, ..., y_{ν - 1}) ↦ EQ((r_0, ..., r_{ν - 1}), (y_0, ..., y_{ν - 1})))$ +/// +/// and contains a method to generate the Lagrange kernel for defining evaluations of multi-linear +/// extensions of arbitrary functions $(f: {0 , 1}^ν ⇾ 𝔽)$ at a given point $(r_0, ..., r_{ν - 1})$ +/// as well as a method to evaluate $EQ^{~}((r_0, ..., r_{ν - 1}), (t_0, ..., t_{ν - 1})))$ for +/// $(t_0, ..., t_{ν - 1}) ∈ 𝔽^ν$. +pub struct EqFunction { + r: Vec, +} + +impl EqFunction { + /// Creates a new [EqFunction]. + pub fn new(r: Vec) -> Self { + let tmp = r.clone(); + EqFunction { r: tmp } + } + + /// Computes $EQ((r_0, ..., r_{ν - 1}), (t_0, ..., t_{ν - 1})))$. + pub fn evaluate(&self, t: &[E]) -> E { + assert_eq!(self.r.len(), t.len()); + + (0..self.r.len()) + .map(|i| self.r[i] * t[i] + (E::ONE - self.r[i]) * (E::ONE - t[i])) + .fold(E::ONE, |acc, term| acc * term) + } + + /// Computes $EQ((r_0, ..., r_{ν - 1}), (y_0, ..., y_{ν - 1}))$ for all + /// $(y_0, ..., y_{ν - 1}) ∈ {0 , 1}^ν$ i.e., the Lagrange kernel at $r = (r_0, ..., r_{ν - 1})$. + pub fn evaluations(&self) -> Vec { + compute_lagrange_basis_evals_at(&self.r) + } + + /// Returns the evaluations of + /// $((y_0, ..., y_{ν - 1}) ↦ EQ^{~}((r_0, ..., r_{ν - 1}), (y_0, ..., y_{ν - 1})))$ + /// over ${0 , 1}^ν$. + pub fn ml_at(evaluation_point: Vec) -> MultiLinearPoly { + let eq_evals = EqFunction::new(evaluation_point.clone()).evaluations(); + MultiLinearPoly::from_evaluations(eq_evals) + } +} + +// HELPER +// ================================================================================================ + +/// Computes the evaluations of the Lagrange basis polynomials over the interpolating +/// set ${0 , 1}^ν$ at $(r_0, ..., r_{ν - 1})$ i.e., the Lagrange kernel at $(r_0, ..., r_{ν - 1})$. +/// +/// TODO: This is a critical function and parallelizing would have a significant impact on +/// performance. +fn compute_lagrange_basis_evals_at(query: &[E]) -> Vec { + let nu = query.len(); + let n = 1 << nu; + + let mut evals: Vec = vec![E::ONE; n]; + let mut size = 1; + for r_i in query.iter().rev() { + size *= 2; + for i in (0..size).rev().step_by(2) { + let scalar = evals[i / 2]; + evals[i] = scalar * *r_i; + evals[i - 1] = scalar - evals[i]; + } + } + evals +} + +/// Computes the inner product in the extension field of two iterators that must yield the same +/// number of items. +/// +/// If `concurrent` feature is enabled, this function can make use of multi-threading. +pub fn inner_product(x: &[E], y: &[E]) -> E { + #[cfg(not(feature = "concurrent"))] + return x.iter().zip(y.iter()).fold(E::ZERO, |acc, (x_i, y_i)| acc + *x_i * *y_i); + + #[cfg(feature = "concurrent")] + return x + .par_iter() + .zip(y.par_iter()) + .map(|(x_i, y_i)| *x_i * *y_i) + .reduce(|| E::ZERO, |a, b| a + b); +} + +// TESTS +// ================================================================================================ + +#[test] +fn test_bind() { + use math::fields::f64::BaseElement; + let mut p = MultiLinearPoly::from_evaluations(vec![BaseElement::ONE; 8]); + let expected = MultiLinearPoly::from_evaluations(vec![BaseElement::ONE; 4]); + + let challenge = rand_utils::rand_value(); + p.bind_least_significant_variable(challenge); + assert_eq!(p, expected) +} diff --git a/sumcheck/src/utils/univariate.rs b/sumcheck/src/utils/univariate.rs new file mode 100644 index 000000000..868206213 --- /dev/null +++ b/sumcheck/src/utils/univariate.rs @@ -0,0 +1,252 @@ + +// Copyright (c) Facebook, Inc. and its affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +use alloc::vec::Vec; +use math::{batch_inversion, polynom, FieldElement}; + + +// COMPRESSED UNIVARIATE POLYNOMIAL +// ================================================================================================ + +/// The coefficients of a univariate polynomial of degree n with the linear term coefficient +/// omitted. +/// +/// This compressed representation is useful during the sum-check protocol as the full uncompressed +/// representation can be recovered from the compressed one and the current sum-check round claim. +#[derive(Clone, Debug)] +pub struct CompressedUnivariatePoly(Vec); + +impl CompressedUnivariatePoly { + /// Evaluates a polynomial at a challenge point using a round claim. + /// + /// The round claim is used to recover the coefficient of the linear term using the relation + /// 2 * c0 + c1 + ... c_{n - 1} = claim. Using the complete list of coefficients, the polynomial + /// is then evaluated using Horner's method. + pub fn evaluate_using_claim(&self, claim: &E, challenge: &E) -> E { + // recover the coefficient of the linear term + let c1 = *claim - self.0.iter().fold(E::ZERO, |acc, term| acc + *term) - self.0[0]; + + // construct the full coefficient list + let mut complete_coefficients = vec![self.0[0], c1]; + complete_coefficients.extend_from_slice(&self.0[1..]); + + // evaluate + polynom::eval(&complete_coefficients, *challenge) + } +} + +/// The evaluations of a univariate polynomial of degree n at 0, 1, ..., n with the evaluation at 0 +/// omitted. +/// +/// This compressed representation is useful during the sum-check protocol as the full uncompressed +/// representation can be recovered from the compressed one and the current sum-check round claim. +#[derive(Clone, Debug)] +pub struct CompressedUnivariatePolyEvals(Vec); + +impl CompressedUnivariatePolyEvals { + /// Gives the coefficient representation of a polynomial represented in evaluation form. + /// + /// Since the evaluation at 0 is omitted, we need to use the round claim to recover + /// the evaluation at 0 using the identity $p(0) + p(1) = claim$. + /// Now, we have that for any polynomial $p(x) = c0 + c1 * x + ... + c_{n-1} * x^{n - 1}$: + /// + /// 1. $p(0) = c0$. + /// 2. $p(x) = c0 + x * q(x) where q(x) = c1 + ... + c_{n-1} * x^{n - 2}$. + /// + /// This means that we can compute the evaluations of q at 1, ..., n - 1 using the evaluations + /// of p and thus reduce by 1 the size of the interpolation problem. + /// Once the coefficient of q are recovered, the c0 coefficient is appended to these and this + /// is precisely the coefficient representation of the original polynomial q. + /// Note that the coefficient of the linear term is removed as this coefficient can be recovered + /// from the remaining coefficients, again, using the round claim using the relation + /// $2 * c0 + c1 + ... c_{n - 1} = claim$. + pub fn to_poly(&self, round_claim: E) -> CompressedUnivariatePoly { + // construct the vector of interpolation points 1, ..., n + let n_minus_1 = self.0.len(); + let points = (1..=n_minus_1 as u32).map(E::BaseField::from).collect::>(); + + // construct their inverses. These will be needed for computing the evaluations + // of the q polynomial as well as for doing the interpolation on q + let points_inv = batch_inversion(&points); + + // compute the zeroth coefficient + let c0 = round_claim - self.0[0]; + + // compute the evaluations of q + let q_evals: Vec = self + .0 + .iter() + .enumerate() + .map(|(i, evals)| (*evals - c0).mul_base(points_inv[i])) + .collect(); + + // interpolate q + let q_coefs = multiply_by_inverse_vandermonde(&q_evals, &points_inv); + + // append c0 to the coefficients of q to get the coefficients of p. The linear term + // coefficient is removed as this can be recovered from the other coefficients using + // the reduced claim. + let mut coefficients = Vec::with_capacity(self.0.len() + 1); + coefficients.push(c0); + coefficients.extend_from_slice(&q_coefs[1..]); + + CompressedUnivariatePoly(coefficients) + } +} + +// HELPER FUNCTIONS +// ================================================================================================ + +/// Given a (row) vector `v`, computes the vector-matrix product `v * V^{-1}` where `V` is +/// the Vandermonde matrix over the points `1, ..., n` where `n` is the length of `v`. +/// The resulting vector will then be the coefficients of the minimal interpolating polynomial +/// through the points `(i+1, v[i])` for `i` in `0, ..., n - 1` +/// +/// The naive way would be to invert the matrix `V` and then compute the vector-matrix product +/// this will cost `O(n^3)` operations and `O(n^2)` memory. We can also try Gaussian elimination +/// but this is also worst case `O(n^3)` operations and `O(n^2)` memory. +/// In the following implementation, we use the fact that the points over which we are interpolating +/// is a set of equidistant points and thus both the Vandermonde matrix and its inverse can be +/// described by sparse linear recurrence equations. +/// More specifically, we use the representation given in [1], where `V^{-1}` is represented as +/// `U * M` where: +/// +/// 1. `M` is a lower triangular matrix where its entries are given by M(i, j) = M(i - 1, j) - M(i - +/// 1, j - 1) / (i - 1) with boundary conditions M(i, 1) = 1 and M(i, j) = 0 when j > i. +/// +/// 2. `U` is an upper triangular (involutory) matrix where its entries are given by U(i, j) = U(i, +/// j - 1) - U(i - 1, j - 1) with boundary condition U(1, j) = 1 and U(i, j) = 0 when i > j. +/// +/// Note that the matrix indexing in the formulas above matches the one in the reference and starts +/// from 1. +/// +/// The above implies that we can do the vector-matrix multiplication in `O(n^2)` and using only +/// `O(n)` space. +/// +/// [1]: https://link.springer.com/article/10.1007/s002110050360 +fn multiply_by_inverse_vandermonde( + vector: &[E], + nodes_inv: &[E::BaseField], +) -> Vec { + let res = multiply_by_u(vector); + multiply_by_m(&res, nodes_inv) +} + +/// Multiplies a (row) vector `v` by an upper triangular matrix `U` to compute `v * U`. +/// +/// `U` is an upper triangular (involutory) matrix with its entries given by +/// U(i, j) = U(i, j - 1) - U(i - 1, j - 1) +/// with boundary condition U(1, j) = 1 and U(i, j) = 0 when i > j. +fn multiply_by_u(vector: &[E]) -> Vec { + let n = vector.len(); + let mut previous_u_col = vec![E::BaseField::ZERO; n]; + previous_u_col[0] = E::BaseField::ONE; + let mut current_u_col = vec![E::BaseField::ZERO; n]; + current_u_col[0] = E::BaseField::ONE; + + let mut result: Vec = vec![E::ZERO; n]; + for (i, res) in result.iter_mut().enumerate() { + *res = vector[0]; + + for (j, v) in vector.iter().enumerate().take(i + 1).skip(1) { + let u_entry: E::BaseField = + compute_u_entry::(j, &mut previous_u_col, &mut current_u_col); + *res += v.mul_base(u_entry); + } + previous_u_col.clone_from(¤t_u_col); + } + + result +} + +/// Multiplies a (row) vector `v` by a lower triangular matrix `M` to compute `v * M`. +/// +/// `M` is a lower triangular matrix with its entries given by +/// M(i, j) = M(i - 1, j) - M(i - 1, j - 1) / (i - 1) +/// with boundary conditions M(i, 1) = 1 and M(i, j) = 0 when j > i. +fn multiply_by_m(vector: &[E], nodes_inv: &[E::BaseField]) -> Vec { + let n = vector.len(); + let mut previous_m_col = vec![E::BaseField::ONE; n]; + let mut current_m_col = vec![E::BaseField::ZERO; n]; + current_m_col[0] = E::BaseField::ONE; + + let mut result: Vec = vec![E::ZERO; n]; + result[0] = vector.iter().fold(E::ZERO, |acc, term| acc + *term); + for (i, res) in result.iter_mut().enumerate().skip(1) { + current_m_col = vec![E::BaseField::ZERO; n]; + + for (j, v) in vector.iter().enumerate().skip(i) { + let m_entry: E::BaseField = + compute_m_entry::(j, &mut previous_m_col, &mut current_m_col, nodes_inv[j - 1]); + *res += v.mul_base(m_entry); + } + previous_m_col.clone_from(¤t_m_col); + } + + result +} + +/// Returns the j-th entry of the i-th column of matrix `U` given the values of the (i - 1)-th +/// column. The i-th column is also updated with the just computed `U(i, j)` entry. +/// +/// `U` is an upper triangular (involutory) matrix with its entries given by +/// U(i, j) = U(i, j - 1) - U(i - 1, j - 1) +/// with boundary condition U(1, j) = 1 and U(i, j) = 0 when i > j. +fn compute_u_entry( + j: usize, + col_prev: &mut [E::BaseField], + col_cur: &mut [E::BaseField], +) -> E::BaseField { + let value = col_prev[j] - col_prev[j - 1]; + col_cur[j] = value; + value +} + +/// Returns the j-th entry of the i-th column of matrix `M` given the values of the (i - 1)-th +/// and the i-th columns. The i-th column is also updated with the just computed `M(i, j)` entry. +/// +/// `M` is a lower triangular matrix with its entries given by +/// M(i, j) = M(i - 1, j) - M(i - 1, j - 1) / (i - 1) +/// with boundary conditions M(i, 1) = 1 and M(i, j) = 0 when j > i. +fn compute_m_entry( + j: usize, + col_previous: &mut [E::BaseField], + col_current: &mut [E::BaseField], + node_inv: E::BaseField, +) -> E::BaseField { + let value = col_current[j - 1] - node_inv * col_previous[j - 1]; + col_current[j] = value; + value +} + +// TESTS +// ================================================================================================ + +#[test] +fn test_poly_partial() { + use math::fields::f64::BaseElement; + + let degree = 1000; + let mut points: Vec = vec![BaseElement::ZERO; degree]; + points + .iter_mut() + .enumerate() + .for_each(|(i, node)| *node = BaseElement::from(i as u32)); + + let p: Vec = rand_utils::rand_vector(degree); + let evals = polynom::eval_many(&p, &points); + + let mut partial_evals = evals.clone(); + partial_evals.remove(0); + + let partial_poly = CompressedUnivariatePolyEvals(partial_evals); + let claim = evals[0] + evals[1]; + let poly_coeff = partial_poly.to_poly(claim); + + let r = rand_utils::rand_vector(1); + + assert_eq!(polynom::eval(&p, r[0]), poly_coeff.evaluate_using_claim(&claim, &r[0])) +} \ No newline at end of file diff --git a/sumcheck/src/verifier/mod.rs b/sumcheck/src/verifier/mod.rs new file mode 100644 index 000000000..a0918f3fe --- /dev/null +++ b/sumcheck/src/verifier/mod.rs @@ -0,0 +1,5 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + From 5e06378c2941abc38c351fcb3b607ef50ce93675 Mon Sep 17 00:00:00 2001 From: Al-Kindi-0 <82364884+Al-Kindi-0@users.noreply.github.com> Date: Tue, 6 Aug 2024 16:08:36 +0200 Subject: [PATCH 02/28] feat: add sum-check prover and verifier --- air/src/air/mod.rs | 66 +++++++ air/src/lib.rs | 4 +- sumcheck/Cargo.toml | 3 + sumcheck/src/lib.rs | 142 +++++++++++++- sumcheck/src/prover/error.rs | 15 ++ sumcheck/src/prover/mod.rs | 324 +++++++++++++++++++++++++++++++ sumcheck/src/utils/mod.rs | 4 +- sumcheck/src/utils/univariate.rs | 20 +- sumcheck/src/verifier/mod.rs | 42 ++++ 9 files changed, 614 insertions(+), 6 deletions(-) create mode 100644 sumcheck/src/prover/error.rs diff --git a/air/src/air/mod.rs b/air/src/air/mod.rs index 53a59fa5a..f7e2411f3 100644 --- a/air/src/air/mod.rs +++ b/air/src/air/mod.rs @@ -601,3 +601,69 @@ pub trait Air: Send + Sync { }) } } + + +pub trait LogUpGkrEvaluator: Clone { + /// Defines the base field of the evaluator. + type BaseField: StarkField; + + /// Public inputs need to compute the final claim. + type PublicInputs: ToElements + Send; + + /// Gets a list of all oracles involved in LogUp-GKR; this is intended to be used in construction of + /// MLEs. + fn get_oracles(&self) -> Vec>; + + /// Returns the number of random values needed to evaluate a query. + fn get_num_rand_values(&self) -> usize; + + /// Returns the number of fractions in the LogUp-GKR statement. + fn get_num_fractions(&self) -> usize; + + /// Returns the maximal degree of the multi-variate associated to the input layer. + fn max_degree(&self) -> usize; + + /// Builds a query from the provided main trace frame and periodic values. + /// + /// Note: it should be possible to provide an implementation of this method based on the + /// information returned from `get_oracles()`. However, this implementation is likely to be + /// expensive compared to the hand-written implementation. However, we could provide a test + /// which verifies that `get_oracles()` and `build_query()` methods are consistent. + fn build_query(&self, frame: &EvaluationFrame, periodic_values: &[E]) -> Vec + where + E: FieldElement; + + /// Evaluates the provided query and writes the results into the numerators and denominators. + /// + /// Note: it is also possible to combine `build_query()` and `evaluate_query()` into a single + /// method to avoid the need to first build the query struct and then evaluate it. However: + /// - We assume that the compiler will be able to optimize this away. + /// - Merging the methods will make it more difficult avoid inconsistencies between + /// `evaluate_query()` and `get_oracles()` methods. + fn evaluate_query( + &self, + query: &[F], + rand_values: &[E], + numerator: &mut [E], + denominator: &mut [E], + ) where + F: FieldElement, + E: FieldElement + ExtensionOf; + + /// Computes the final claim for the LogUp-GKR circuit. + /// + /// The default implementation of this method returns E::ZERO as it is expected that the + /// fractional sums will cancel out. However, in cases when some boundary conditions need to + /// be imposed on the LogUp-GKR relations, this method can be overridden to compute the final + /// expected claim. + fn compute_claim(&self, inputs: &Self::PublicInputs, rand_values: &[E]) -> E + where + E: FieldElement; +} + +#[derive(Clone, Debug, PartialEq, PartialOrd, Eq, Ord)] +pub enum LogUpGkrOracle { + CurrentRow(usize), + NextRow(usize), + PeriodicValue(Vec), +} \ No newline at end of file diff --git a/air/src/lib.rs b/air/src/lib.rs index 539a812d9..aaede0bda 100644 --- a/air/src/lib.rs +++ b/air/src/lib.rs @@ -47,6 +47,6 @@ pub use air::{ DeepCompositionCoefficients, EvaluationFrame, GkrRandElements, GkrVerifier, LagrangeConstraintsCompositionCoefficients, LagrangeKernelBoundaryConstraint, LagrangeKernelConstraints, LagrangeKernelEvaluationFrame, LagrangeKernelRandElements, - LagrangeKernelTransitionConstraints, TraceInfo, TransitionConstraintDegree, - TransitionConstraints, + LagrangeKernelTransitionConstraints, LogUpGkrEvaluator, LogUpGkrOracle, TraceInfo, + TransitionConstraintDegree, TransitionConstraints, }; diff --git a/sumcheck/Cargo.toml b/sumcheck/Cargo.toml index c4f4c3c4f..60a690bd0 100644 --- a/sumcheck/Cargo.toml +++ b/sumcheck/Cargo.toml @@ -18,9 +18,12 @@ default = ["std"] std = ["utils/std"] [dependencies] +air = { version = "0.9", path = "../air", package = "winter-air", default-features = false } +crypto = { version = "0.9", path = "../crypto", package = "winter-crypto", default-features = false } math = { version = "0.9", path = "../math", package = "winter-math", default-features = false } utils = { version = "0.9", path = "../utils/core", package = "winter-utils", default-features = false } rayon = { version = "1.8", optional = true } +thiserror = { version = "1.0", git = "https://github.com/bitwalker/thiserror", branch = "no-std", default-features = false } [dev-dependencies] criterion = "0.5" diff --git a/sumcheck/src/lib.rs b/sumcheck/src/lib.rs index 64d44a2dc..60112033c 100644 --- a/sumcheck/src/lib.rs +++ b/sumcheck/src/lib.rs @@ -6,11 +6,151 @@ #![no_std] +use alloc::vec::Vec; +use math::FieldElement; +use ::utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable}; + #[macro_use] extern crate alloc; mod prover; +pub use prover::*; mod verifier; +pub use verifier::*; + +mod utils; +pub use utils::*; + +/// Represents an opening claim at an evaluation point against a batch of oracles. +/// +/// After verifying [`Proof`], the verifier is left with a question on the validity of a final +/// claim on a number of oracles open to a given set of values at some given point. +/// This question is answered either using further interaction with the Prover or using +/// a polynomial commitment opening proof in the compiled protocol. +#[derive(Clone, Debug)] +pub struct FinalOpeningClaim { + pub eval_point: Vec, + pub openings: Vec, +} + +impl Serializable for FinalOpeningClaim { + fn write_into(&self, target: &mut W) { + let Self { eval_point, openings } = self; + eval_point.write_into(target); + openings.write_into(target); + } +} + +impl Deserializable for FinalOpeningClaim +where + E: FieldElement, +{ + fn read_from(source: &mut R) -> Result { + Ok(Self { + eval_point: Deserializable::read_from(source)?, + openings: Deserializable::read_from(source)?, + }) + } +} + +/// A sum-check proof. +/// +/// Composed of the round proofs i.e., the polynomials sent by the Prover at each round as well as +/// the (claimed) openings of the multi-linear oracles at the evaluation point given by the round +/// challenges. +#[derive(Debug, Clone)] +pub struct SumCheckProof { + pub openings_claim: FinalOpeningClaim, + pub round_proofs: Vec>, +} + +/// A sum-check round proof. +/// +/// This represents the partial polynomial sent by the Prover during one of the rounds of the +/// sum-check protocol. The polynomial is in coefficient form and excludes the coefficient for +/// the linear term as the Verifier can recover it from the other coefficients and the current +/// (reduced) claim. +#[derive(Debug, Clone)] +pub struct RoundProof { + pub round_poly_coefs: CompressedUnivariatePoly, +} + +impl Serializable for RoundProof { + fn write_into(&self, target: &mut W) { + let Self { round_poly_coefs } = self; + round_poly_coefs.write_into(target); + } +} + +impl Deserializable for RoundProof +where + E: FieldElement, +{ + fn read_from(source: &mut R) -> Result { + Ok(Self { + round_poly_coefs: Deserializable::read_from(source)?, + }) + } +} + +impl Serializable for SumCheckProof +where + E: FieldElement, +{ + fn write_into(&self, target: &mut W) { + self.openings_claim.write_into(target); + self.round_proofs.write_into(target); + } +} + +impl Deserializable for SumCheckProof +where + E: FieldElement, +{ + fn read_from(source: &mut R) -> Result { + Ok(Self { + openings_claim: Deserializable::read_from(source)?, + round_proofs: Deserializable::read_from(source)?, + }) + } +} + +/// Contains the round challenges sent by the Verifier up to some round as well as the current +/// reduced claim. +#[derive(Debug)] +pub struct SumCheckRoundClaim { + pub eval_point: Vec, + pub claim: E, +} + + +/// The non-linear composition polynomial of the LogUp-GKR protocol specific to the input layer. +pub fn evaluate_composition_poly( + numerators: &[E], + denominators: &[E], + eq_eval: E, + r_sum_check: E, + tensored_merge_randomness: &[E], +) -> E { + let numerators = MultiLinearPoly::from_evaluations(numerators.to_vec()); + let denominators = MultiLinearPoly::from_evaluations(denominators.to_vec()); + + let (left_numerators, right_numerators) = numerators.project_least_significant_variable(); + let (left_denominators, right_denominators) = denominators.project_least_significant_variable(); + + let eval_left_numerators = + left_numerators.evaluate_with_lagrange_kernel(&tensored_merge_randomness); + let eval_right_numerators = + right_numerators.evaluate_with_lagrange_kernel(&tensored_merge_randomness); + + let eval_left_denominators = + left_denominators.evaluate_with_lagrange_kernel(&tensored_merge_randomness); + let eval_right_denominators = + right_denominators.evaluate_with_lagrange_kernel(&tensored_merge_randomness); -mod utils; \ No newline at end of file + eq_eval + * ((eval_left_numerators * eval_right_denominators + + eval_right_numerators * eval_left_denominators) + + eval_left_denominators * eval_right_denominators * r_sum_check) +} \ No newline at end of file diff --git a/sumcheck/src/prover/error.rs b/sumcheck/src/prover/error.rs new file mode 100644 index 000000000..c86198d73 --- /dev/null +++ b/sumcheck/src/prover/error.rs @@ -0,0 +1,15 @@ +#[derive(Debug, thiserror::Error)] +pub enum SumCheckProverError { + #[error("number of rounds for sum-check must be greater than zero")] + NumRoundsZero, + #[error("the number of rounds is greater than the number of variables")] + TooManyRounds, + #[error("should provide at least one multi-linear polynomial as input")] + NoMlsProvided, + #[error("failed to generate round challenge")] + FailedToGenerateChallenge, + #[error("the provided multi-linears have different arities")] + MlesDifferentArities, + #[error("multi-linears should have at least one variable")] + AtLeastOneVariable, +} diff --git a/sumcheck/src/prover/mod.rs b/sumcheck/src/prover/mod.rs index e5b8995e4..743714bc3 100644 --- a/sumcheck/src/prover/mod.rs +++ b/sumcheck/src/prover/mod.rs @@ -3,4 +3,328 @@ // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. +use alloc::vec::Vec; +use air::LogUpGkrEvaluator; +use crypto::{ElementHasher, RandomCoin}; +use math::FieldElement; + +use crate::{ + evaluate_composition_poly, + utils::{CompressedUnivariatePolyEvals, EqFunction, MultiLinearPoly}, + FinalOpeningClaim, RoundProof, SumCheckProof, SumCheckRoundClaim, +}; + +mod error; +pub use error::SumCheckProverError; + +/// A sum-check prover for the input layer which can accommodate non-linear expressions in +/// the numerators of the LogUp relation. +pub fn sum_check_prove_higher_degree< + E: FieldElement, + C: RandomCoin, + H: ElementHasher, +>( + evaluator: &impl LogUpGkrEvaluator::BaseField>, + claim: E, + r_sum_check: E, + rand_merge: Vec, + log_up_randomness: Vec, + merged_mls: &mut Vec>, + mls: &mut Vec>, + coin: &mut C, +) -> Result, SumCheckProverError> { + let num_rounds = mls[0].num_variables(); + + let mut round_proofs = vec![]; + + // setup first round claim + let mut current_round_claim = SumCheckRoundClaim { eval_point: vec![], claim }; + let tensored_merge_randomness = EqFunction::ml_at(rand_merge.to_vec()).evaluations().to_vec(); + + // run the first round of the protocol + let round_poly_evals = sumcheck_round( + evaluator.clone(), + mls, + &merged_mls, + &log_up_randomness, + r_sum_check, + &tensored_merge_randomness, + ); + let round_poly_coefs = round_poly_evals.to_poly(current_round_claim.claim); + + // reseed with the s_0 polynomial + coin.reseed(H::hash_elements(&round_poly_coefs.0)); + round_proofs.push(RoundProof { round_poly_coefs }); + + for i in 1..num_rounds { + // generate random challenge r_i for the i-th round + let round_challenge = + coin.draw().map_err(|_| SumCheckProverError::FailedToGenerateChallenge)?; + + // compute the new reduced round claim + let new_round_claim = + reduce_claim(&round_proofs[i - 1], current_round_claim, round_challenge); + + // fold each multi-linear using the round challenge + mls.iter_mut() + .for_each(|ml| ml.bind_least_significant_variable(round_challenge)); + + // fold each merged multi-linear using the round challenge + merged_mls + .iter_mut() + .for_each(|ml| ml.bind_least_significant_variable(round_challenge)); + + // run the i-th round of the protocol using the folded multi-linears for the new reduced + // claim. This basically computes the s_i polynomial. + let round_poly_evals = sumcheck_round( + evaluator.clone(), + mls, + merged_mls, + &log_up_randomness, + r_sum_check, + &tensored_merge_randomness, + ); + + // update the claim + current_round_claim = new_round_claim; + + let round_poly_coefs = round_poly_evals.to_poly(current_round_claim.claim); + + // reseed with the s_i polynomial + coin.reseed(H::hash_elements(&round_poly_coefs.0)); + let round_proof = RoundProof { round_poly_coefs }; + round_proofs.push(round_proof); + } + + // generate the last random challenge + let round_challenge = + coin.draw().map_err(|_| SumCheckProverError::FailedToGenerateChallenge)?; + + // fold each multi-linear using the last random round challenge + mls.iter_mut() + .for_each(|ml| ml.bind_least_significant_variable(round_challenge)); + // fold each merged multi-linear using the last random round challenge + merged_mls + .iter_mut() + .for_each(|ml| ml.bind_least_significant_variable(round_challenge)); + + let SumCheckRoundClaim { eval_point, claim: _claim } = + reduce_claim(&round_proofs[num_rounds - 1], current_round_claim, round_challenge); + + let openings = mls.iter_mut().map(|ml| ml.evaluations()[0]).collect(); + + Ok(SumCheckProof { + openings_claim: FinalOpeningClaim { eval_point, openings }, + round_proofs, + }) +} + +fn sumcheck_round( + evaluator: impl LogUpGkrEvaluator::BaseField>, + mls: &[MultiLinearPoly], + merged_mls: &[MultiLinearPoly], + log_up_randomness: &[E], + r_sum_check: E, + tensored_merge_randomness: &[E], +) -> CompressedUnivariatePolyEvals { + let num_ml = mls.len(); + let num_vars = mls[0].num_variables(); + let num_rounds = num_vars - 1; + let mut evals_one = vec![E::ZERO; num_ml]; + let mut evals_zero = vec![E::ZERO; num_ml]; + let mut evals_x = vec![E::ZERO; num_ml]; + + let mut deltas = vec![E::ZERO; num_ml]; + + let mut numerators = vec![E::ZERO; evaluator.get_num_fractions()]; + let mut denominators = vec![E::ZERO; evaluator.get_num_fractions()]; + + let total_evals = (0..1 << num_rounds).map(|i| { + let mut total_evals = vec![E::ZERO; evaluator.max_degree() as usize]; + + for (j, ml) in mls.iter().enumerate() { + evals_zero[j] = ml.evaluations()[2 * i]; + + evals_one[j] = ml.evaluations()[2 * i + 1]; + } + + let eq_at_zero = merged_mls[4].evaluations()[2 * i]; + let eq_at_one = merged_mls[4].evaluations()[2 * i + 1]; + + let p0 = merged_mls[0][2 * i + 1]; + let p1 = merged_mls[1][2 * i + 1]; + let q0 = merged_mls[2][2 * i + 1]; + let q1 = merged_mls[3][2 * i + 1]; + + total_evals[0] = comb_func(&p0, &p1, &q0, &q1, &eq_at_one, &r_sum_check); + + evals_zero + .iter() + .zip(evals_one.iter().zip(deltas.iter_mut().zip(evals_x.iter_mut()))) + .for_each(|(a0, (a1, (delta, evx)))| { + *delta = *a1 - *a0; + *evx = *a1; + }); + let eq_delta = eq_at_one - eq_at_zero; + let mut eq_x = eq_at_one; + + for e in total_evals.iter_mut().skip(1) { + evals_x.iter_mut().zip(deltas.iter()).for_each(|(evx, delta)| { + *evx += *delta; + }); + eq_x += eq_delta; + + evaluator.evaluate_query( + &evals_x, + &log_up_randomness, + &mut numerators, + &mut denominators, + ); + + *e = evaluate_composition_poly( + &numerators, + &denominators, + eq_x, + r_sum_check, + &tensored_merge_randomness, + ); + } + + total_evals + }); + + let evaluations = + total_evals.fold(vec![E::ZERO; evaluator.max_degree() as usize], |mut acc, evals| { + acc.iter_mut().zip(evals.iter()).for_each(|(a, ev)| *a += *ev); + acc + }); + + CompressedUnivariatePolyEvals(evaluations) +} + +/// Sum-check prover for non-linear multivariate polynomial of the simple LogUp-GKR. +pub fn sumcheck_prove_plain< + E: FieldElement, + C: RandomCoin, + H: ElementHasher, +>( + num_rounds: usize, + claim: E, + r_batch: E, + p0: &mut MultiLinearPoly, + p1: &mut MultiLinearPoly, + q0: &mut MultiLinearPoly, + q1: &mut MultiLinearPoly, + eq: &mut MultiLinearPoly, + transcript: &mut C, +) -> Result<(SumCheckProof, E), SumCheckProverError> { + let mut round_proofs = vec![]; + + let mut claim = claim; + let mut challenges = vec![]; + for _ in 0..num_rounds { + let mut eval_point_0 = E::ZERO; + let mut eval_point_2 = E::ZERO; + let mut eval_point_3 = E::ZERO; + + let len = p0.num_evaluations() / 2; + for i in 0..len { + eval_point_0 += + comb_func(&p0[2 * i], &p1[2 * i], &q0[2 * i], &q1[2 * i], &eq[2 * i], &r_batch); + let p0_delta = p0[2 * i + 1] - p0[2 * i]; + let p1_delta = p1[2 * i + 1] - p1[2 * i]; + let q0_delta = q0[2 * i + 1] - q0[2 * i]; + let q1_delta = q1[2 * i + 1] - q1[2 * i]; + let eq_delta = eq[2 * i + 1] - eq[2 * i]; + + let mut p0_evx = p0[2 * i + 1] + p0_delta; + let mut p1_evx = p1[2 * i + 1] + p1_delta; + let mut q0_evx = q0[2 * i + 1] + q0_delta; + let mut q1_evx = q1[2 * i + 1] + q1_delta; + let mut eq_evx = eq[2 * i + 1] + eq_delta; + eval_point_2 += comb_func(&p0_evx, &p1_evx, &q0_evx, &q1_evx, &eq_evx, &r_batch); + + p0_evx += p0_delta; + p1_evx += p1_delta; + q0_evx += q0_delta; + q1_evx += q1_delta; + eq_evx += eq_delta; + + eval_point_3 += comb_func(&p0_evx, &p1_evx, &q0_evx, &q1_evx, &eq_evx, &r_batch); + } + + let evals = vec![ + claim - eval_point_0, // Optimization applied using the claim to reduce the number of sums computed + eval_point_2, + eval_point_3, + ]; + let poly = CompressedUnivariatePolyEvals(evals); + let round_poly_coefs = poly.to_poly(claim); + + // reseed with the s_i polynomial + transcript.reseed(H::hash_elements(&round_poly_coefs.0)); + let round_proof = RoundProof { + round_poly_coefs: round_poly_coefs.clone(), + }; + + round_proofs.push(round_proof); + + let round_challenge = + transcript.draw().map_err(|_| SumCheckProverError::FailedToGenerateChallenge)?; + + // compute the new reduced round claim + let new_claim = round_poly_coefs.evaluate_using_claim(&claim, &round_challenge); + + // fold each multi-linear using the round challenge + p0.bind_least_significant_variable(round_challenge); + p1.bind_least_significant_variable(round_challenge); + q0.bind_least_significant_variable(round_challenge); + q1.bind_least_significant_variable(round_challenge); + eq.bind_least_significant_variable(round_challenge); + + challenges.push(round_challenge); + + claim = new_claim; + } + + Ok(( + SumCheckProof { + openings_claim: FinalOpeningClaim { + eval_point: challenges, + openings: vec![p0[0], p1[0], q0[0], q1[0]], + }, + round_proofs, + }, + claim, + )) +} + +/// The non-linear composition polynomial of the LogUp-GKR protocol. +/// +/// This is the result of batching the `p_k` and `q_k` of section 3.2 in +/// https://eprint.iacr.org/2023/1284.pdf. +fn comb_func(p0: &E, p1: &E, q0: &E, q1: &E, eq: &E, r_batch: &E) -> E { + (*p0 * *q1 + *p1 * *q0 + *r_batch * *q0 * *q1) * *eq +} + +/// Reduces an old claim to a new claim using the round challenge. +pub fn reduce_claim( + current_poly: &RoundProof, + current_round_claim: SumCheckRoundClaim, + round_challenge: E, +) -> SumCheckRoundClaim { + // evaluate the round polynomial at the round challenge to obtain the new claim + let new_claim = current_poly + .round_poly_coefs + .evaluate_using_claim(¤t_round_claim.claim, &round_challenge); + + // update the evaluation point using the round challenge + let mut new_partial_eval_point = current_round_claim.eval_point; + new_partial_eval_point.push(round_challenge); + + SumCheckRoundClaim { + eval_point: new_partial_eval_point, + claim: new_claim, + } +} diff --git a/sumcheck/src/utils/mod.rs b/sumcheck/src/utils/mod.rs index 41c63e1df..afb0810fd 100644 --- a/sumcheck/src/utils/mod.rs +++ b/sumcheck/src/utils/mod.rs @@ -4,5 +4,7 @@ // LICENSE file in the root directory of this source tree. mod univariate; +pub use univariate::{CompressedUnivariatePoly, CompressedUnivariatePolyEvals}; -mod multilinear; \ No newline at end of file +mod multilinear; +pub use multilinear::{EqFunction, MultiLinearPoly}; \ No newline at end of file diff --git a/sumcheck/src/utils/univariate.rs b/sumcheck/src/utils/univariate.rs index 868206213..361f97273 100644 --- a/sumcheck/src/utils/univariate.rs +++ b/sumcheck/src/utils/univariate.rs @@ -5,6 +5,7 @@ // LICENSE file in the root directory of this source tree. use alloc::vec::Vec; +use utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable}; use math::{batch_inversion, polynom, FieldElement}; @@ -17,7 +18,7 @@ use math::{batch_inversion, polynom, FieldElement}; /// This compressed representation is useful during the sum-check protocol as the full uncompressed /// representation can be recovered from the compressed one and the current sum-check round claim. #[derive(Clone, Debug)] -pub struct CompressedUnivariatePoly(Vec); +pub struct CompressedUnivariatePoly(pub(crate) Vec); impl CompressedUnivariatePoly { /// Evaluates a polynomial at a challenge point using a round claim. @@ -38,13 +39,28 @@ impl CompressedUnivariatePoly { } } +impl Serializable for CompressedUnivariatePoly { + fn write_into(&self, target: &mut W) { + self.0.write_into(target); + } +} + +impl Deserializable for CompressedUnivariatePoly +where + E: FieldElement, +{ + fn read_from(source: &mut R) -> Result { + Ok(Self(Deserializable::read_from(source)?)) + } +} + /// The evaluations of a univariate polynomial of degree n at 0, 1, ..., n with the evaluation at 0 /// omitted. /// /// This compressed representation is useful during the sum-check protocol as the full uncompressed /// representation can be recovered from the compressed one and the current sum-check round claim. #[derive(Clone, Debug)] -pub struct CompressedUnivariatePolyEvals(Vec); +pub struct CompressedUnivariatePolyEvals(pub(crate) Vec); impl CompressedUnivariatePolyEvals { /// Gives the coefficient representation of a polynomial represented in evaluation form. diff --git a/sumcheck/src/verifier/mod.rs b/sumcheck/src/verifier/mod.rs index a0918f3fe..71b3ceb02 100644 --- a/sumcheck/src/verifier/mod.rs +++ b/sumcheck/src/verifier/mod.rs @@ -3,3 +3,45 @@ // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. +use crate::{RoundProof, SumCheckRoundClaim}; +use crypto::{ElementHasher, RandomCoin}; +use math::FieldElement; + +/// Verifies a round of the sum-check protocol. +pub fn verify_rounds( + claim: E, + round_proofs: &[RoundProof], + coin: &mut C, +) -> Result, SumCheckVerifierError> +where + E: FieldElement, + C: RandomCoin, + H: ElementHasher, +{ + let mut round_claim = claim; + let mut evaluation_point = vec![]; + for round_proof in round_proofs { + let round_poly_coefs = round_proof.round_poly_coefs.clone(); + coin.reseed(H::hash_elements(&round_poly_coefs.0)); + + let r = coin.draw().map_err(|_| SumCheckVerifierError::FailedToGenerateChallenge)?; + + round_claim = round_proof.round_poly_coefs.evaluate_using_claim(&round_claim, &r); + evaluation_point.push(r); + } + + Ok(SumCheckRoundClaim { + eval_point: evaluation_point, + claim: round_claim, + }) +} + +#[derive(Debug, thiserror::Error)] +pub enum SumCheckVerifierError { + #[error("the final evaluation check of sum-check failed")] + FinalEvaluationCheckFailed, + #[error("failed to generate round challenge")] + FailedToGenerateChallenge, + #[error("wrong opening point for the oracles")] + WrongOpeningPoint, +} From 16389d6a1ccd2fa6c0207a8c7875fe550123c472 Mon Sep 17 00:00:00 2001 From: Al-Kindi-0 <82364884+Al-Kindi-0@users.noreply.github.com> Date: Tue, 6 Aug 2024 16:43:14 +0200 Subject: [PATCH 03/28] tests: add sanity tests for utils --- air/src/air/mod.rs | 3 +- sumcheck/src/lib.rs | 15 ++++----- sumcheck/src/prover/mod.rs | 23 ++++++------- sumcheck/src/utils/mod.rs | 2 +- sumcheck/src/utils/multilinear.rs | 55 ++++++++++++++++++++++++++++--- sumcheck/src/utils/univariate.rs | 11 +++---- sumcheck/src/verifier/mod.rs | 3 +- 7 files changed, 78 insertions(+), 34 deletions(-) diff --git a/air/src/air/mod.rs b/air/src/air/mod.rs index f7e2411f3..1ae1981d5 100644 --- a/air/src/air/mod.rs +++ b/air/src/air/mod.rs @@ -602,7 +602,6 @@ pub trait Air: Send + Sync { } } - pub trait LogUpGkrEvaluator: Clone { /// Defines the base field of the evaluator. type BaseField: StarkField; @@ -666,4 +665,4 @@ pub enum LogUpGkrOracle { CurrentRow(usize), NextRow(usize), PeriodicValue(Vec), -} \ No newline at end of file +} diff --git a/sumcheck/src/lib.rs b/sumcheck/src/lib.rs index 60112033c..14541ae0d 100644 --- a/sumcheck/src/lib.rs +++ b/sumcheck/src/lib.rs @@ -3,12 +3,12 @@ // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. - #![no_std] use alloc::vec::Vec; -use math::FieldElement; + use ::utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable}; +use math::FieldElement; #[macro_use] extern crate alloc; @@ -124,7 +124,6 @@ pub struct SumCheckRoundClaim { pub claim: E, } - /// The non-linear composition polynomial of the LogUp-GKR protocol specific to the input layer. pub fn evaluate_composition_poly( numerators: &[E], @@ -140,17 +139,17 @@ pub fn evaluate_composition_poly( let (left_denominators, right_denominators) = denominators.project_least_significant_variable(); let eval_left_numerators = - left_numerators.evaluate_with_lagrange_kernel(&tensored_merge_randomness); + left_numerators.evaluate_with_lagrange_kernel(tensored_merge_randomness); let eval_right_numerators = - right_numerators.evaluate_with_lagrange_kernel(&tensored_merge_randomness); + right_numerators.evaluate_with_lagrange_kernel(tensored_merge_randomness); let eval_left_denominators = - left_denominators.evaluate_with_lagrange_kernel(&tensored_merge_randomness); + left_denominators.evaluate_with_lagrange_kernel(tensored_merge_randomness); let eval_right_denominators = - right_denominators.evaluate_with_lagrange_kernel(&tensored_merge_randomness); + right_denominators.evaluate_with_lagrange_kernel(tensored_merge_randomness); eq_eval * ((eval_left_numerators * eval_right_denominators + eval_right_numerators * eval_left_denominators) + eval_left_denominators * eval_right_denominators * r_sum_check) -} \ No newline at end of file +} diff --git a/sumcheck/src/prover/mod.rs b/sumcheck/src/prover/mod.rs index 743714bc3..afce5ed0a 100644 --- a/sumcheck/src/prover/mod.rs +++ b/sumcheck/src/prover/mod.rs @@ -20,6 +20,7 @@ pub use error::SumCheckProverError; /// A sum-check prover for the input layer which can accommodate non-linear expressions in /// the numerators of the LogUp relation. +#[allow(clippy::too_many_arguments)] pub fn sum_check_prove_higher_degree< E: FieldElement, C: RandomCoin, @@ -30,8 +31,8 @@ pub fn sum_check_prove_higher_degree< r_sum_check: E, rand_merge: Vec, log_up_randomness: Vec, - merged_mls: &mut Vec>, - mls: &mut Vec>, + merged_mls: &mut [MultiLinearPoly], + mls: &mut [MultiLinearPoly], coin: &mut C, ) -> Result, SumCheckProverError> { let num_rounds = mls[0].num_variables(); @@ -46,7 +47,7 @@ pub fn sum_check_prove_higher_degree< let round_poly_evals = sumcheck_round( evaluator.clone(), mls, - &merged_mls, + merged_mls, &log_up_randomness, r_sum_check, &tensored_merge_randomness, @@ -141,7 +142,7 @@ fn sumcheck_round( let mut denominators = vec![E::ZERO; evaluator.get_num_fractions()]; let total_evals = (0..1 << num_rounds).map(|i| { - let mut total_evals = vec![E::ZERO; evaluator.max_degree() as usize]; + let mut total_evals = vec![E::ZERO; evaluator.max_degree()]; for (j, ml) in mls.iter().enumerate() { evals_zero[j] = ml.evaluations()[2 * i]; @@ -177,7 +178,7 @@ fn sumcheck_round( evaluator.evaluate_query( &evals_x, - &log_up_randomness, + log_up_randomness, &mut numerators, &mut denominators, ); @@ -187,23 +188,23 @@ fn sumcheck_round( &denominators, eq_x, r_sum_check, - &tensored_merge_randomness, + tensored_merge_randomness, ); } total_evals }); - let evaluations = - total_evals.fold(vec![E::ZERO; evaluator.max_degree() as usize], |mut acc, evals| { - acc.iter_mut().zip(evals.iter()).for_each(|(a, ev)| *a += *ev); - acc - }); + let evaluations = total_evals.fold(vec![E::ZERO; evaluator.max_degree()], |mut acc, evals| { + acc.iter_mut().zip(evals.iter()).for_each(|(a, ev)| *a += *ev); + acc + }); CompressedUnivariatePolyEvals(evaluations) } /// Sum-check prover for non-linear multivariate polynomial of the simple LogUp-GKR. +#[allow(clippy::too_many_arguments)] pub fn sumcheck_prove_plain< E: FieldElement, C: RandomCoin, diff --git a/sumcheck/src/utils/mod.rs b/sumcheck/src/utils/mod.rs index afb0810fd..d57e05677 100644 --- a/sumcheck/src/utils/mod.rs +++ b/sumcheck/src/utils/mod.rs @@ -7,4 +7,4 @@ mod univariate; pub use univariate::{CompressedUnivariatePoly, CompressedUnivariatePolyEvals}; mod multilinear; -pub use multilinear::{EqFunction, MultiLinearPoly}; \ No newline at end of file +pub use multilinear::{EqFunction, MultiLinearPoly}; diff --git a/sumcheck/src/utils/multilinear.rs b/sumcheck/src/utils/multilinear.rs index d6907d14c..c45b993e2 100644 --- a/sumcheck/src/utils/multilinear.rs +++ b/sumcheck/src/utils/multilinear.rs @@ -5,8 +5,8 @@ use alloc::vec::Vec; use core::ops::Index; -use math::FieldElement; +use math::FieldElement; #[cfg(feature = "concurrent")] pub use rayon::prelude::*; @@ -178,7 +178,7 @@ impl EqFunction { /// Computes the evaluations of the Lagrange basis polynomials over the interpolating /// set ${0 , 1}^ν$ at $(r_0, ..., r_{ν - 1})$ i.e., the Lagrange kernel at $(r_0, ..., r_{ν - 1})$. -/// +/// /// TODO: This is a critical function and parallelizing would have a significant impact on /// performance. fn compute_lagrange_basis_evals_at(query: &[E]) -> Vec { @@ -198,9 +198,8 @@ fn compute_lagrange_basis_evals_at(query: &[E]) -> Vec { evals } -/// Computes the inner product in the extension field of two iterators that must yield the same -/// number of items. -/// +/// Computes the inner product in the extension field of two slices with the same number of items. +/// /// If `concurrent` feature is enabled, this function can make use of multi-threading. pub fn inner_product(x: &[E], y: &[E]) -> E { #[cfg(not(feature = "concurrent"))] @@ -217,6 +216,26 @@ pub fn inner_product(x: &[E], y: &[E]) -> E { // TESTS // ================================================================================================ +#[test] +fn multi_linear_sanity_checks() { + use math::fields::f64::BaseElement; + let nu = 3; + let n = 1 << nu; + + // the zero multi-linear should evaluate to zero + let p = MultiLinearPoly::from_evaluations(vec![BaseElement::ZERO; n]); + let challenge: Vec = rand_utils::rand_vector(nu); + + assert_eq!(BaseElement::ZERO, p.evaluate(&challenge)); + + // the constant multi-linear should be constant everywhere + let constant = rand_utils::rand_value(); + let p = MultiLinearPoly::from_evaluations(vec![constant; n]); + let challenge: Vec = rand_utils::rand_vector(nu); + + assert_eq!(constant, p.evaluate(&challenge)) +} + #[test] fn test_bind() { use math::fields::f64::BaseElement; @@ -227,3 +246,29 @@ fn test_bind() { p.bind_least_significant_variable(challenge); assert_eq!(p, expected) } + +#[test] +fn test_eq_function() { + use math::fields::f64::BaseElement; + use rand_utils::rand_value; + + let one = BaseElement::ONE; + + // Lagrange kernel is computed correctly + let r0 = rand_value(); + let r1 = rand_value(); + let eq_function = EqFunction::new(vec![r0, r1]); + + let expected = vec![(one - r0) * (one - r1), r0 * (one - r1), (one - r0) * r1, r0 * r1]; + + assert_eq!(expected, eq_function.evaluations()); + + // Lagrange kernel evaluation is correct + let q0 = rand_value(); + let q1 = rand_value(); + let tensored_query = vec![(one - q0) * (one - q1), q0 * (one - q1), (one - q0) * q1, q0 * q1]; + + let expected = inner_product(&tensored_query, &eq_function.evaluations()); + + assert_eq!(expected, eq_function.evaluate(&[q0, q1])) +} diff --git a/sumcheck/src/utils/univariate.rs b/sumcheck/src/utils/univariate.rs index 361f97273..399d19b1b 100644 --- a/sumcheck/src/utils/univariate.rs +++ b/sumcheck/src/utils/univariate.rs @@ -1,13 +1,12 @@ - // Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. use alloc::vec::Vec; -use utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable}; -use math::{batch_inversion, polynom, FieldElement}; +use math::{batch_inversion, polynom, FieldElement}; +use utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable}; // COMPRESSED UNIVARIATE POLYNOMIAL // ================================================================================================ @@ -39,13 +38,13 @@ impl CompressedUnivariatePoly { } } -impl Serializable for CompressedUnivariatePoly { +impl Serializable for CompressedUnivariatePoly { fn write_into(&self, target: &mut W) { self.0.write_into(target); } } -impl Deserializable for CompressedUnivariatePoly +impl Deserializable for CompressedUnivariatePoly where E: FieldElement, { @@ -265,4 +264,4 @@ fn test_poly_partial() { let r = rand_utils::rand_vector(1); assert_eq!(polynom::eval(&p, r[0]), poly_coeff.evaluate_using_claim(&claim, &r[0])) -} \ No newline at end of file +} diff --git a/sumcheck/src/verifier/mod.rs b/sumcheck/src/verifier/mod.rs index 71b3ceb02..bea98c44b 100644 --- a/sumcheck/src/verifier/mod.rs +++ b/sumcheck/src/verifier/mod.rs @@ -3,10 +3,11 @@ // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. -use crate::{RoundProof, SumCheckRoundClaim}; use crypto::{ElementHasher, RandomCoin}; use math::FieldElement; +use crate::{RoundProof, SumCheckRoundClaim}; + /// Verifies a round of the sum-check protocol. pub fn verify_rounds( claim: E, From 380aa1a0dc0fc95eb55e3b4a1530dec42e9841c3 Mon Sep 17 00:00:00 2001 From: Al-Kindi-0 <82364884+Al-Kindi-0@users.noreply.github.com> Date: Tue, 6 Aug 2024 18:21:51 +0200 Subject: [PATCH 04/28] doc: document sumcheck_round --- sumcheck/src/prover/mod.rs | 39 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/sumcheck/src/prover/mod.rs b/sumcheck/src/prover/mod.rs index afce5ed0a..24e79096f 100644 --- a/sumcheck/src/prover/mod.rs +++ b/sumcheck/src/prover/mod.rs @@ -121,6 +121,45 @@ pub fn sum_check_prove_higher_degree< }) } +/// Computes the polynomial +/// +/// $$ +/// s_i(X_i) := \sum_{(x_{i + 1},\cdots, x_{\nu - 1}) +/// w(r_0,\cdots, r_{i - 1}, X_i, x_{i + 1}, \cdots, x_{\nu - 1}). +/// $$ +/// +/// where +/// +/// $$ +/// w(x_0,\cdots, x_{\nu - 1}) := g(f_0((x_0,\cdots, x_{\nu - 1})), +/// \cdots , f_c((x_0,\cdots, x_{\nu - 1}))). +/// $$ +/// +/// Given a degree bound `d_max` for all variables, it suffices to compute the evaluations of `s_i` +/// at `d_max + 1` points. Given that $s_{i}(0) = s_{i}(1) - s_{i - 1}(r_{i - 1})$ it is sufficient +/// to compute the evaluations on only `d_max` points. +/// +/// The algorithm works by iterating over the variables $(x_{i + 1}, \cdots, x_{\nu - 1})$ in +/// ${0, 1}^{\nu - 1 - i}$. For each such tuple, we store the evaluations of the (folded) +/// multi-linears at $(0, x_{i + 1}, \cdots, x_{\nu - 1})$ and +/// $(1, x_{i + 1}, \cdots, x_{\nu - 1})$ in two arrays, `evals_zero` and `evals_one`. +/// Using `evals_one`, remember that we optimize evaluating at 0 away, we get the first evaluation +/// i.e., $s_i(1)$. +/// +/// For the remaining evaluations, we use the fact that the folded `f_i` is multi-linear and hence +/// we can write +/// +/// $$ +/// f_i(X_i, x_{i + 1}, \cdots, x_{\nu - 1}) = +/// (1 - X_i) . f_i(0, x_{i + 1}, \cdots, x_{\nu - 1}) + +/// X_i . f_i(1, x_{i + 1}, \cdots, x_{\nu - 1}) +/// $$ +/// +/// Note that we omitted writing the folding randomness for readability. +/// Since the evaluation domain is $\{0, 1, ... , d_max\}$, we can compute the evaluations based on +/// the previous one using only additions. This is the purpose of `deltas`, to hold the increments +/// added to each multi-linear to compute the evaluation at the next point, and `evals_x` to hold +/// the current evaluation at $x$ in $\{2, ... , d_max\}$. fn sumcheck_round( evaluator: impl LogUpGkrEvaluator::BaseField>, mls: &[MultiLinearPoly], From 7a1a99e25168150314b6fd529d62a9fcefc6811b Mon Sep 17 00:00:00 2001 From: Al-Kindi-0 <82364884+Al-Kindi-0@users.noreply.github.com> Date: Wed, 7 Aug 2024 08:16:15 +0200 Subject: [PATCH 05/28] feat: use SmallVec --- sumcheck/Cargo.toml | 1 + sumcheck/src/prover/mod.rs | 12 +++++----- sumcheck/src/utils/multilinear.rs | 8 +++++-- sumcheck/src/utils/univariate.rs | 37 +++++++++++++++++++++++++------ 4 files changed, 43 insertions(+), 15 deletions(-) diff --git a/sumcheck/Cargo.toml b/sumcheck/Cargo.toml index 60a690bd0..865f0dbf8 100644 --- a/sumcheck/Cargo.toml +++ b/sumcheck/Cargo.toml @@ -23,6 +23,7 @@ crypto = { version = "0.9", path = "../crypto", package = "winter-crypto", defau math = { version = "0.9", path = "../math", package = "winter-math", default-features = false } utils = { version = "0.9", path = "../utils/core", package = "winter-utils", default-features = false } rayon = { version = "1.8", optional = true } +smallvec = { version = "1.13", default-features = false } thiserror = { version = "1.0", git = "https://github.com/bitwalker/thiserror", branch = "no-std", default-features = false } [dev-dependencies] diff --git a/sumcheck/src/prover/mod.rs b/sumcheck/src/prover/mod.rs index 24e79096f..042da1529 100644 --- a/sumcheck/src/prover/mod.rs +++ b/sumcheck/src/prover/mod.rs @@ -45,7 +45,7 @@ pub fn sum_check_prove_higher_degree< // run the first round of the protocol let round_poly_evals = sumcheck_round( - evaluator.clone(), + evaluator, mls, merged_mls, &log_up_randomness, @@ -79,7 +79,7 @@ pub fn sum_check_prove_higher_degree< // run the i-th round of the protocol using the folded multi-linears for the new reduced // claim. This basically computes the s_i polynomial. let round_poly_evals = sumcheck_round( - evaluator.clone(), + evaluator, mls, merged_mls, &log_up_randomness, @@ -161,7 +161,7 @@ pub fn sum_check_prove_higher_degree< /// added to each multi-linear to compute the evaluation at the next point, and `evals_x` to hold /// the current evaluation at $x$ in $\{2, ... , d_max\}$. fn sumcheck_round( - evaluator: impl LogUpGkrEvaluator::BaseField>, + evaluator: &impl LogUpGkrEvaluator::BaseField>, mls: &[MultiLinearPoly], merged_mls: &[MultiLinearPoly], log_up_randomness: &[E], @@ -239,7 +239,7 @@ fn sumcheck_round( acc }); - CompressedUnivariatePolyEvals(evaluations) + CompressedUnivariatePolyEvals(evaluations.into()) } /// Sum-check prover for non-linear multivariate polynomial of the simple LogUp-GKR. @@ -272,6 +272,7 @@ pub fn sumcheck_prove_plain< for i in 0..len { eval_point_0 += comb_func(&p0[2 * i], &p1[2 * i], &q0[2 * i], &q1[2 * i], &eq[2 * i], &r_batch); + let p0_delta = p0[2 * i + 1] - p0[2 * i]; let p1_delta = p1[2 * i + 1] - p1[2 * i]; let q0_delta = q0[2 * i + 1] - q0[2 * i]; @@ -290,7 +291,6 @@ pub fn sumcheck_prove_plain< q0_evx += q0_delta; q1_evx += q1_delta; eq_evx += eq_delta; - eval_point_3 += comb_func(&p0_evx, &p1_evx, &q0_evx, &q1_evx, &eq_evx, &r_batch); } @@ -299,7 +299,7 @@ pub fn sumcheck_prove_plain< eval_point_2, eval_point_3, ]; - let poly = CompressedUnivariatePolyEvals(evals); + let poly = CompressedUnivariatePolyEvals(evals.into()); let round_poly_coefs = poly.to_poly(claim); // reseed with the s_i polynomial diff --git a/sumcheck/src/utils/multilinear.rs b/sumcheck/src/utils/multilinear.rs index c45b993e2..0ad5f6a18 100644 --- a/sumcheck/src/utils/multilinear.rs +++ b/sumcheck/src/utils/multilinear.rs @@ -9,6 +9,7 @@ use core::ops::Index; use math::FieldElement; #[cfg(feature = "concurrent")] pub use rayon::prelude::*; +use smallvec::SmallVec; // MULTI-LINEAR POLYNOMIAL // ================================================================================================ @@ -102,6 +103,9 @@ impl Index for MultiLinearPoly { // EQ FUNCTION // ================================================================================================ +/// Maximal expected size of the point of a given Lagrange kernel. +const MAX_EQ_SIZE: usize = 25; + /// The EQ (equality) function is the binary function defined by /// /// $$ @@ -139,13 +143,13 @@ impl Index for MultiLinearPoly { /// as well as a method to evaluate $EQ^{~}((r_0, ..., r_{ν - 1}), (t_0, ..., t_{ν - 1})))$ for /// $(t_0, ..., t_{ν - 1}) ∈ 𝔽^ν$. pub struct EqFunction { - r: Vec, + r: SmallVec<[E; MAX_EQ_SIZE]>, } impl EqFunction { /// Creates a new [EqFunction]. pub fn new(r: Vec) -> Self { - let tmp = r.clone(); + let tmp = r.into(); EqFunction { r: tmp } } diff --git a/sumcheck/src/utils/univariate.rs b/sumcheck/src/utils/univariate.rs index 399d19b1b..8cd56e683 100644 --- a/sumcheck/src/utils/univariate.rs +++ b/sumcheck/src/utils/univariate.rs @@ -6,8 +6,15 @@ use alloc::vec::Vec; use math::{batch_inversion, polynom, FieldElement}; +use smallvec::SmallVec; use utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable}; +// CONSTANTS +// ================================================================================================ + +/// Maximum expected size of the round polynomials. This is needed for `SmallVec`. +const MAX_POLY_SIZE: usize = 10; + // COMPRESSED UNIVARIATE POLYNOMIAL // ================================================================================================ @@ -16,8 +23,8 @@ use utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serial /// /// This compressed representation is useful during the sum-check protocol as the full uncompressed /// representation can be recovered from the compressed one and the current sum-check round claim. -#[derive(Clone, Debug)] -pub struct CompressedUnivariatePoly(pub(crate) Vec); +#[derive(Clone, Debug, PartialEq)] +pub struct CompressedUnivariatePoly(pub(crate) SmallVec<[E; MAX_POLY_SIZE]>); impl CompressedUnivariatePoly { /// Evaluates a polynomial at a challenge point using a round claim. @@ -40,7 +47,8 @@ impl CompressedUnivariatePoly { impl Serializable for CompressedUnivariatePoly { fn write_into(&self, target: &mut W) { - self.0.write_into(target); + let vector: Vec = self.0.clone().into_vec(); + vector.write_into(target); } } @@ -49,7 +57,8 @@ where E: FieldElement, { fn read_from(source: &mut R) -> Result { - Ok(Self(Deserializable::read_from(source)?)) + let vector: Vec = Vec::::read_from(source)?; + Ok(Self(vector.into())) } } @@ -59,7 +68,7 @@ where /// This compressed representation is useful during the sum-check protocol as the full uncompressed /// representation can be recovered from the compressed one and the current sum-check round claim. #[derive(Clone, Debug)] -pub struct CompressedUnivariatePolyEvals(pub(crate) Vec); +pub struct CompressedUnivariatePolyEvals(pub(crate) SmallVec<[E; MAX_POLY_SIZE]>); impl CompressedUnivariatePolyEvals { /// Gives the coefficient representation of a polynomial represented in evaluation form. @@ -104,7 +113,7 @@ impl CompressedUnivariatePolyEvals { // append c0 to the coefficients of q to get the coefficients of p. The linear term // coefficient is removed as this can be recovered from the other coefficients using // the reduced claim. - let mut coefficients = Vec::with_capacity(self.0.len() + 1); + let mut coefficients = SmallVec::with_capacity(self.0.len() + 1); coefficients.push(c0); coefficients.extend_from_slice(&q_coefs[1..]); @@ -257,7 +266,7 @@ fn test_poly_partial() { let mut partial_evals = evals.clone(); partial_evals.remove(0); - let partial_poly = CompressedUnivariatePolyEvals(partial_evals); + let partial_poly = CompressedUnivariatePolyEvals(partial_evals.into()); let claim = evals[0] + evals[1]; let poly_coeff = partial_poly.to_poly(claim); @@ -265,3 +274,17 @@ fn test_poly_partial() { assert_eq!(polynom::eval(&p, r[0]), poly_coeff.evaluate_using_claim(&claim, &r[0])) } + +#[test] +fn test_serialization() { + use math::fields::f64::BaseElement; + + let original_poly = + CompressedUnivariatePoly(rand_utils::rand_array::().into()); + let poly_bytes = original_poly.to_bytes(); + + let deserialized_poly = + CompressedUnivariatePoly::::read_from_bytes(&poly_bytes).unwrap(); + + assert_eq!(original_poly, deserialized_poly) +} From 19010663822a6cf83becd72fe764bd25afdc7e4b Mon Sep 17 00:00:00 2001 From: Al-Kindi-0 <82364884+Al-Kindi-0@users.noreply.github.com> Date: Thu, 8 Aug 2024 16:55:55 +0200 Subject: [PATCH 06/28] docs: improve documentation of sum-check --- sumcheck/src/prover/mod.rs | 150 +++++++++++++++++++++++++++++++++++++ 1 file changed, 150 insertions(+) diff --git a/sumcheck/src/prover/mod.rs b/sumcheck/src/prover/mod.rs index 042da1529..a1042e96c 100644 --- a/sumcheck/src/prover/mod.rs +++ b/sumcheck/src/prover/mod.rs @@ -20,6 +20,122 @@ pub use error::SumCheckProverError; /// A sum-check prover for the input layer which can accommodate non-linear expressions in /// the numerators of the LogUp relation. +/// +/// The LogUp-GKR protocol in [1] is an IOP for the following statement +/// +/// $$ +/// \sum_{v_i, x_i} \frac{p_n\left(v_1, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right)} +/// {q_n\left(v_1, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right)} = C +/// $$ +/// +/// where: +/// +/// $$ +/// p_n\left(v_1, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right) = +/// \sum_{w\in\{0, 1\}^\mu} EQ\left(\left(v_1, \cdots, v_{\mu}\right), +/// \left(w_1, \cdots, w_{\mu}\right)\right) +/// g_{[w]}\left(f_1\left(x_1, \cdots, x_{\nu}\right), +/// \cdots, f_l\left(x_1, \cdots, x_{\nu}\right)\right) +/// $$ +/// +/// and +/// +/// $$ +/// q_n\left(v_1, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right) = +/// \sum_{w\in\{0, 1\}^\mu} EQ\left(\left(v_1, \cdots, v_{\mu}\right), +/// \left(w_1, \cdots, w_{\mu}\right)\right) +/// h_{[w]}\left(f_1\left(x_1, \cdots, x_{\nu}\right), +/// \cdots, f_l\left(x_1, \cdots, x_{\nu}\right)\right) +/// $$ +/// +/// and +/// +/// 1. $f_i$ are multi-linears. +/// 2. ${[w]} := \sum_i w_i \cdot 2^i$ and $w := (w_1, \cdots, w_{\mu})$. +/// 3. $h_{j}$ and $g_{j}$ are multi-variate polynomials for $j = 0, \cdots, 2^{\mu} - 1$. +/// 4. $n := \nu + \mu$ +/// +/// The sum above is evaluated using a layered circuit with the equation linking the input layer +/// values $p_n$ to the next layer values $p_{n-1}$ given by the following relations +/// +/// $$ +/// p_{n-1}\left(v_2, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right) = \sum_{w_i, y_i} +/// EQ\left(\left(v_2, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right), +/// \left(w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right)\right) +/// \cdot \left( p_n\left(1, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) +/// \cdot q_n\left(0, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) + +/// p_n\left(0, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) \cdot +/// q_n\left(1, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right)\right) +/// $$ +/// +/// and +/// +/// $$ +/// q_{n-1}\left(v_2, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right) = \sum_{w_i, y_i} +/// EQ\left(\left(v_2, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right), +/// \left(w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right)\right) +/// \cdot \left( q_n\left(1, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) +/// \cdot q_n\left(0, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right)\right) +/// $$ +/// +/// and similarly for all subsequent layers. +/// +/// These expressions are nothing but the equations in Section 3.2 in [1] but with the projection +/// happening at the first argument instead of the last. +/// +/// We can now note a few things about the above: +/// +/// 1. During the evaluation phase of the circuit, the prover needs to compute every tuple +/// $\left(p_k, q_k\right)$ for $k = n, \cdots, 1$ over the boolean hyper-cubes of +/// the appropriate sizes. In particular, the prover will have the evaluations +/// $\left(p_n, q_n\right)$ over $\{0, 1\}^{\mu + \nu}$ stored. +/// 2. Since $p_n$ and $q_n$ are linear in the first $\mu$ variables, we can directly use +/// the stored evaluations of $p_n$ and $q_n$ during the final sum-check, the one linking +/// the input layer to its next layer, for the first $\mu - 1$ rounds. This means that for +/// the first $\mu - 1$ rounds, the last sum-check protocol can be treated like the sum-checks +/// for the other layers i.e., the original degree $3$ sum-check of the LogUp-GKR paper. +/// 3. For the last $\nu$ rounds of the final sum-check, we can still use the evaluations of +/// $\left(p_k, q_k\right)$, or more precisely the result of their binding with the $\mu -1$ +/// round challenges from point 2 above, in order to optimize the computation of the sum-check +/// round polynomials but due to the non-linearity of $\left(p_n, q_n\right)$ in the last $\nu$ +/// variables, we will have to work with +/// +/// $$ +/// p_n\left(v_1, r_1, \cdots, r_{\mu - 1}, x_1, \cdots, x_{\nu}\right) = \sum_{w\in\{0, 1\}^{\mu}} +/// EQ\left(\left(v_1, r_1, \cdots, r_{\mu - 1}\right), \left(w_1, \cdots, w_{\mu}\right)\right) +/// g_{[w]}\left(f_1\left(x_1, \cdots, x_{\nu}\right), \cdots, +/// f_l\left(x_1, \cdots, x_{\nu}\right)\right) +/// $$ +/// +/// and +/// +/// $$ +/// q_n\left(v_1, r_1, \cdots, r_{\mu - 1}, x_1, \cdots, x_{\nu}\right) = \sum_{w\in\{0, 1\}^{\mu}} +/// EQ\left(\left(v_1, r_1, \cdots, r_{\mu - 1}\right), \left(w_1, \cdots, w_{\mu}\right)\right) +/// h_{[w]}\left(f_1\left(x_1, \cdots, x_{\nu}\right), \cdots, +/// f_l\left(x_1, \cdots, x_{\nu}\right)\right) +/// $$ +/// +/// where $r_i$ is the sum-check round challenges of the first $\mu - 1$ rounds. +/// +/// The current function executes the last $\nu$ parts of the sum-check and uses +/// the [`LogUpGkrEvaluator`] to evaluate $g_i$ and $h_i$ during the computation of the evaluations +/// of the round polynomials. +/// +/// As an optimization, the function uses the five polynomials, refered to as [`merged_mls`]: +/// +/// 1. $p_n\left(0, r_1, \cdots, r_{\mu - 1}, x_1, \cdots, x_{\nu}\right)$ +/// 2. $p_n\left(1, r_1, \cdots, r_{\mu - 1}, x_1, \cdots, x_{\nu}\right)$ +/// 3. $q_n\left(0, r_1, \cdots, r_{\mu - 1}, x_1, \cdots, x_{\nu}\right)$ +/// 4. $q_n\left(1, r_1, \cdots, r_{\mu - 1}, x_1, \cdots, x_{\nu}\right)$ +/// 5. $$\left(y_1, \cdots, y_{\nu}\right) \longrightarrow +/// EQ\left(\left(t_1, \cdots, t_{\mu + \nu - 1}\right), +/// \left(r_1, \cdots, r_{\mu - 1}, y_1, \cdots, y_{\nu}\right)\right) +/// $$ +/// where $t_i$ is the sum-check randomness from the previous layer. +/// +/// +/// [1]: https://eprint.iacr.org/2023/1284 #[allow(clippy::too_many_arguments)] pub fn sum_check_prove_higher_degree< E: FieldElement, @@ -41,6 +157,9 @@ pub fn sum_check_prove_higher_degree< // setup first round claim let mut current_round_claim = SumCheckRoundClaim { eval_point: vec![], claim }; + + // compute, for all (w_1, \cdots, w_{\mu - 1}) in {0, 1}^{\mu - 1}: + // EQ\left(\left(r_1, \cdots, r_{\mu - 1}\right), \left(w_1, \cdots, w_{\mu - 1}\right)\right) let tensored_merge_randomness = EqFunction::ml_at(rand_merge.to_vec()).evaluations().to_vec(); // run the first round of the protocol @@ -135,6 +254,8 @@ pub fn sum_check_prove_higher_degree< /// \cdots , f_c((x_0,\cdots, x_{\nu - 1}))). /// $$ /// +/// where `g` is the expression defined in the documentation of [`sum_check_prove_higher_degree`] +/// /// Given a degree bound `d_max` for all variables, it suffices to compute the evaluations of `s_i` /// at `d_max + 1` points. Given that $s_{i}(0) = s_{i}(1) - s_{i - 1}(r_{i - 1})$ it is sufficient /// to compute the evaluations on only `d_max` points. @@ -243,6 +364,35 @@ fn sumcheck_round( } /// Sum-check prover for non-linear multivariate polynomial of the simple LogUp-GKR. +/// +/// More specifically, the following function implements the logic of the sum-check prover as +/// described in Section 3.2 in [1], that is, given verifier challenges , the following implements +/// the sum-check prover for the following two statements +/// $$ +/// p_{\nu - \kappa}\left(v_{\kappa+1}, \cdots, v_{\nu}\right) = \sum_{w_i} +/// EQ\left(\left(v_{\kappa+1}, \cdots, v_{\nu}\right), \left(w_{\kappa+1}, \cdots, +/// w_{\nu}\right)\right) \cdot +/// \left( p_{\nu-\kappa+1}\left(1, w_{\kappa+1}, \cdots, w_{\nu}\right) \cdot +/// q_{\nu-\kappa+1}\left(0, w_{\kappa+1}, \cdots, w_{\nu}\right) + +/// p_{\nu-\kappa+1}\left(0, w_{\kappa+1}, \cdots, w_{\nu}\right) \cdot +/// q_{\nu-\kappa+1}\left(1, w_{\kappa+1}, \cdots, w_{\nu}\right)\right) +/// $$ +/// +/// and +/// +/// $$ +/// q_{\nu -k}\left(v_{\kappa+1}, \cdots, v_{\nu}\right) = \sum_{w_i}EQ\left(\left(v_{\kappa+1}, +/// \cdots, v_{\nu}\right), \left(w_{\kappa+1}, \cdots, w_{\nu }\right)\right) \cdot +/// \left( q_{\nu-\kappa+1}\left(1, w_{\kappa+1}, \cdots, w_{\nu}\right) \cdot +/// q_{\nu-\kappa+1}\left(0, w_{\kappa+1}, \cdots, w_{\nu}\right)\right) +/// $$ +/// +/// for $k = 1, \cdots, \nu - 1$ +/// +/// Instead of executing two runs of the sum-check protocol, a batching randomness `r_batch` is +/// sent by the verifier at the outset in order to batch the two statments. +/// +/// [1]: https://eprint.iacr.org/2023/1284 #[allow(clippy::too_many_arguments)] pub fn sumcheck_prove_plain< E: FieldElement, From 8a5721687359d43a0ef58387bb2b7dc28e8324e6 Mon Sep 17 00:00:00 2001 From: Al-Kindi-0 <82364884+Al-Kindi-0@users.noreply.github.com> Date: Fri, 9 Aug 2024 09:34:11 +0200 Subject: [PATCH 07/28] feat: add remaining functions for sum-check verifier --- sumcheck/src/lib.rs | 42 ++++++++-- sumcheck/src/verifier/mod.rs | 153 +++++++++++++++++++++++++++++++++-- 2 files changed, 184 insertions(+), 11 deletions(-) diff --git a/sumcheck/src/lib.rs b/sumcheck/src/lib.rs index 14541ae0d..458b5fa59 100644 --- a/sumcheck/src/lib.rs +++ b/sumcheck/src/lib.rs @@ -65,6 +65,28 @@ pub struct SumCheckProof { pub round_proofs: Vec>, } +impl Serializable for SumCheckProof +where + E: FieldElement, +{ + fn write_into(&self, target: &mut W) { + self.openings_claim.write_into(target); + self.round_proofs.write_into(target); + } +} + +impl Deserializable for SumCheckProof +where + E: FieldElement, +{ + fn read_from(source: &mut R) -> Result { + Ok(Self { + openings_claim: Deserializable::read_from(source)?, + round_proofs: Deserializable::read_from(source)?, + }) + } +} + /// A sum-check round proof. /// /// This represents the partial polynomial sent by the Prover during one of the rounds of the @@ -94,24 +116,32 @@ where } } -impl Serializable for SumCheckProof +/// A proof for the input circuit layer i.e., the final layer in the GKR protocol. +#[derive(Debug, Clone)] +pub struct FinalLayerProof { + pub before_merge_proof: Vec>, + pub after_merge_proof: SumCheckProof, +} + +impl Serializable for FinalLayerProof where E: FieldElement, { fn write_into(&self, target: &mut W) { - self.openings_claim.write_into(target); - self.round_proofs.write_into(target); + let Self { before_merge_proof, after_merge_proof } = self; + before_merge_proof.write_into(target); + after_merge_proof.write_into(target); } } -impl Deserializable for SumCheckProof +impl Deserializable for FinalLayerProof where E: FieldElement, { fn read_from(source: &mut R) -> Result { Ok(Self { - openings_claim: Deserializable::read_from(source)?, - round_proofs: Deserializable::read_from(source)?, + before_merge_proof: Deserializable::read_from(source)?, + after_merge_proof: Deserializable::read_from(source)?, }) } } diff --git a/sumcheck/src/verifier/mod.rs b/sumcheck/src/verifier/mod.rs index bea98c44b..010788f17 100644 --- a/sumcheck/src/verifier/mod.rs +++ b/sumcheck/src/verifier/mod.rs @@ -3,17 +3,23 @@ // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. +use alloc::vec::Vec; + +use air::LogUpGkrEvaluator; use crypto::{ElementHasher, RandomCoin}; use math::FieldElement; -use crate::{RoundProof, SumCheckRoundClaim}; +use crate::{ + evaluate_composition_poly, EqFunction, FinalLayerProof, FinalOpeningClaim, RoundProof, + SumCheckProof, SumCheckRoundClaim, +}; -/// Verifies a round of the sum-check protocol. +/// Verifies a round of the sum-check protocol without executing the final check. pub fn verify_rounds( claim: E, round_proofs: &[RoundProof], coin: &mut C, -) -> Result, SumCheckVerifierError> +) -> Result, Error> where E: FieldElement, C: RandomCoin, @@ -25,7 +31,7 @@ where let round_poly_coefs = round_proof.round_poly_coefs.clone(); coin.reseed(H::hash_elements(&round_poly_coefs.0)); - let r = coin.draw().map_err(|_| SumCheckVerifierError::FailedToGenerateChallenge)?; + let r = coin.draw().map_err(|_| Error::FailedToGenerateChallenge)?; round_claim = round_proof.round_poly_coefs.evaluate_using_claim(&round_claim, &r); evaluation_point.push(r); @@ -37,8 +43,145 @@ where }) } +/// Verifies sum-check proofs, as part of the GKR proof, for all GKR layers except for the last one +/// i.e., the circuit input layer. +pub fn verify_sum_check_intermediate_layers< + E: FieldElement, + C: RandomCoin, + H: ElementHasher, +>( + proof: &SumCheckProof, + gkr_eval_point: &[E], + claim: (E, E), + transcript: &mut C, +) -> Result, Error> { + // generate challenge to batch sum-checks + transcript.reseed(H::hash_elements(&[claim.0, claim.1])); + let r_batch: E = transcript.draw().map_err(|_| Error::FailedToGenerateChallenge)?; + + // compute the claim for the batched sum-check + let reduced_claim = claim.0 + claim.1 * r_batch; + + let SumCheckProof { openings_claim, round_proofs } = proof; + + let final_round_claim = verify_rounds(reduced_claim, round_proofs, transcript)?; + assert_eq!(openings_claim.eval_point, final_round_claim.eval_point); + + let p0 = openings_claim.openings[0]; + let p1 = openings_claim.openings[1]; + let q0 = openings_claim.openings[2]; + let q1 = openings_claim.openings[3]; + + let eq = EqFunction::new(gkr_eval_point.to_vec()).evaluate(&openings_claim.eval_point); + + if (p0 * q1 + p1 * q0 + r_batch * q0 * q1) * eq != final_round_claim.claim { + return Err(Error::FinalEvaluationCheckFailed); + } + + Ok(openings_claim.clone()) +} + +/// Verifies the final sum-check proof of a GKR proof. +pub fn verify_sum_check_input_layer< + E: FieldElement, + C: RandomCoin, + H: ElementHasher, +>( + evaluator: &impl LogUpGkrEvaluator, + proof: &FinalLayerProof, + log_up_randomness: Vec, + gkr_eval_point: &[E], + claim: (E, E), + transcript: &mut C, +) -> Result, Error> { + let FinalLayerProof { before_merge_proof, after_merge_proof } = proof; + + // generate challenge to batch sum-checks + transcript.reseed(H::hash_elements(&[claim.0, claim.1])); + let r_batch: E = transcript.draw().map_err(|_| Error::FailedToGenerateChallenge)?; + + // compute the claim for the batched sum-check + let reduced_claim = claim.0 + claim.1 * r_batch; + + // verify the first half of the sum-check proof i.e., `before_merge_proof` + let SumCheckRoundClaim { eval_point: rand_merge, claim } = + verify_rounds(reduced_claim, before_merge_proof, transcript)?; + + // verify the second half of the sum-check proof i.e., `after_merge_proof` + verify_sum_check_final( + claim, + after_merge_proof, + rand_merge, + r_batch, + log_up_randomness, + gkr_eval_point, + evaluator, + transcript, + ) +} + +/// Verifies the second sum-check proof for the input layer, including the final check, and returns +/// a [`FinalOpeningClaim`] to the STARK verifier in order to verify the correctness of +/// the openings. +#[allow(clippy::too_many_arguments)] +fn verify_sum_check_final< + E: FieldElement, + C: RandomCoin, + H: ElementHasher, +>( + claim: E, + after_merge_proof: &SumCheckProof, + rand_merge: Vec, + r_batch: E, + log_up_randomness: Vec, + gkr_eval_point: &[E], + evaluator: &impl LogUpGkrEvaluator, + transcript: &mut C, +) -> Result, Error> { + let SumCheckProof { openings_claim, round_proofs } = after_merge_proof; + + let SumCheckRoundClaim { + eval_point: evaluation_point, + claim: claimed_evaluation, + } = verify_rounds(claim, round_proofs, transcript)?; + + if openings_claim.eval_point != evaluation_point { + return Err(Error::WrongOpeningPoint); + } + + let mut numerators = vec![E::ZERO; evaluator.get_num_fractions()]; + let mut denominators = vec![E::ZERO; evaluator.get_num_fractions()]; + + evaluator.evaluate_query( + &openings_claim.openings, + &log_up_randomness, + &mut numerators, + &mut denominators, + ); + + let lagrange_ker = EqFunction::new(gkr_eval_point.to_vec()); + let mut gkr_point = rand_merge.clone(); + + gkr_point.extend_from_slice(&openings_claim.eval_point.clone()); + let eq_eval = lagrange_ker.evaluate(&gkr_point); + let tensored_merge_randomness = EqFunction::ml_at(rand_merge.to_vec()).evaluations().to_vec(); + let expected_evaluation = evaluate_composition_poly( + &numerators, + &denominators, + eq_eval, + r_batch, + &tensored_merge_randomness, + ); + + if expected_evaluation != claimed_evaluation { + Err(Error::FinalEvaluationCheckFailed) + } else { + Ok(openings_claim.clone()) + } +} + #[derive(Debug, thiserror::Error)] -pub enum SumCheckVerifierError { +pub enum Error { #[error("the final evaluation check of sum-check failed")] FinalEvaluationCheckFailed, #[error("failed to generate round challenge")] From ff9e6fa19ab4a8a97afa4d83c12261a0ee47059f Mon Sep 17 00:00:00 2001 From: Al-Kindi-0 <82364884+Al-Kindi-0@users.noreply.github.com> Date: Fri, 9 Aug 2024 10:12:18 +0200 Subject: [PATCH 08/28] chore: move prover into sub-mod --- sumcheck/src/lib.rs | 8 + sumcheck/src/prover/mod.rs | 514 +------------------------------ sumcheck/src/utils/univariate.rs | 7 +- 3 files changed, 18 insertions(+), 511 deletions(-) diff --git a/sumcheck/src/lib.rs b/sumcheck/src/lib.rs index 458b5fa59..f3b36d392 100644 --- a/sumcheck/src/lib.rs +++ b/sumcheck/src/lib.rs @@ -154,6 +154,14 @@ pub struct SumCheckRoundClaim { pub claim: E, } +/// The non-linear composition polynomial of the LogUp-GKR protocol. +/// +/// This is the result of batching the `p_k` and `q_k` of section 3.2 in +/// https://eprint.iacr.org/2023/1284.pdf. +fn comb_func(p0: &E, p1: &E, q0: &E, q1: &E, eq: &E, r_batch: &E) -> E { + (*p0 * *q1 + *p1 * *q0 + *r_batch * *q0 * *q1) * *eq +} + /// The non-linear composition polynomial of the LogUp-GKR protocol specific to the input layer. pub fn evaluate_composition_poly( numerators: &[E], diff --git a/sumcheck/src/prover/mod.rs b/sumcheck/src/prover/mod.rs index a1042e96c..bdf1aebb9 100644 --- a/sumcheck/src/prover/mod.rs +++ b/sumcheck/src/prover/mod.rs @@ -3,518 +3,12 @@ // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. -use alloc::vec::Vec; +mod high_degree; +pub use high_degree::sum_check_prove_higher_degree; -use air::LogUpGkrEvaluator; -use crypto::{ElementHasher, RandomCoin}; -use math::FieldElement; - -use crate::{ - evaluate_composition_poly, - utils::{CompressedUnivariatePolyEvals, EqFunction, MultiLinearPoly}, - FinalOpeningClaim, RoundProof, SumCheckProof, SumCheckRoundClaim, -}; +mod plain; +pub use plain::sumcheck_prove_plain; mod error; pub use error::SumCheckProverError; -/// A sum-check prover for the input layer which can accommodate non-linear expressions in -/// the numerators of the LogUp relation. -/// -/// The LogUp-GKR protocol in [1] is an IOP for the following statement -/// -/// $$ -/// \sum_{v_i, x_i} \frac{p_n\left(v_1, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right)} -/// {q_n\left(v_1, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right)} = C -/// $$ -/// -/// where: -/// -/// $$ -/// p_n\left(v_1, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right) = -/// \sum_{w\in\{0, 1\}^\mu} EQ\left(\left(v_1, \cdots, v_{\mu}\right), -/// \left(w_1, \cdots, w_{\mu}\right)\right) -/// g_{[w]}\left(f_1\left(x_1, \cdots, x_{\nu}\right), -/// \cdots, f_l\left(x_1, \cdots, x_{\nu}\right)\right) -/// $$ -/// -/// and -/// -/// $$ -/// q_n\left(v_1, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right) = -/// \sum_{w\in\{0, 1\}^\mu} EQ\left(\left(v_1, \cdots, v_{\mu}\right), -/// \left(w_1, \cdots, w_{\mu}\right)\right) -/// h_{[w]}\left(f_1\left(x_1, \cdots, x_{\nu}\right), -/// \cdots, f_l\left(x_1, \cdots, x_{\nu}\right)\right) -/// $$ -/// -/// and -/// -/// 1. $f_i$ are multi-linears. -/// 2. ${[w]} := \sum_i w_i \cdot 2^i$ and $w := (w_1, \cdots, w_{\mu})$. -/// 3. $h_{j}$ and $g_{j}$ are multi-variate polynomials for $j = 0, \cdots, 2^{\mu} - 1$. -/// 4. $n := \nu + \mu$ -/// -/// The sum above is evaluated using a layered circuit with the equation linking the input layer -/// values $p_n$ to the next layer values $p_{n-1}$ given by the following relations -/// -/// $$ -/// p_{n-1}\left(v_2, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right) = \sum_{w_i, y_i} -/// EQ\left(\left(v_2, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right), -/// \left(w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right)\right) -/// \cdot \left( p_n\left(1, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) -/// \cdot q_n\left(0, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) + -/// p_n\left(0, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) \cdot -/// q_n\left(1, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right)\right) -/// $$ -/// -/// and -/// -/// $$ -/// q_{n-1}\left(v_2, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right) = \sum_{w_i, y_i} -/// EQ\left(\left(v_2, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right), -/// \left(w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right)\right) -/// \cdot \left( q_n\left(1, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) -/// \cdot q_n\left(0, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right)\right) -/// $$ -/// -/// and similarly for all subsequent layers. -/// -/// These expressions are nothing but the equations in Section 3.2 in [1] but with the projection -/// happening at the first argument instead of the last. -/// -/// We can now note a few things about the above: -/// -/// 1. During the evaluation phase of the circuit, the prover needs to compute every tuple -/// $\left(p_k, q_k\right)$ for $k = n, \cdots, 1$ over the boolean hyper-cubes of -/// the appropriate sizes. In particular, the prover will have the evaluations -/// $\left(p_n, q_n\right)$ over $\{0, 1\}^{\mu + \nu}$ stored. -/// 2. Since $p_n$ and $q_n$ are linear in the first $\mu$ variables, we can directly use -/// the stored evaluations of $p_n$ and $q_n$ during the final sum-check, the one linking -/// the input layer to its next layer, for the first $\mu - 1$ rounds. This means that for -/// the first $\mu - 1$ rounds, the last sum-check protocol can be treated like the sum-checks -/// for the other layers i.e., the original degree $3$ sum-check of the LogUp-GKR paper. -/// 3. For the last $\nu$ rounds of the final sum-check, we can still use the evaluations of -/// $\left(p_k, q_k\right)$, or more precisely the result of their binding with the $\mu -1$ -/// round challenges from point 2 above, in order to optimize the computation of the sum-check -/// round polynomials but due to the non-linearity of $\left(p_n, q_n\right)$ in the last $\nu$ -/// variables, we will have to work with -/// -/// $$ -/// p_n\left(v_1, r_1, \cdots, r_{\mu - 1}, x_1, \cdots, x_{\nu}\right) = \sum_{w\in\{0, 1\}^{\mu}} -/// EQ\left(\left(v_1, r_1, \cdots, r_{\mu - 1}\right), \left(w_1, \cdots, w_{\mu}\right)\right) -/// g_{[w]}\left(f_1\left(x_1, \cdots, x_{\nu}\right), \cdots, -/// f_l\left(x_1, \cdots, x_{\nu}\right)\right) -/// $$ -/// -/// and -/// -/// $$ -/// q_n\left(v_1, r_1, \cdots, r_{\mu - 1}, x_1, \cdots, x_{\nu}\right) = \sum_{w\in\{0, 1\}^{\mu}} -/// EQ\left(\left(v_1, r_1, \cdots, r_{\mu - 1}\right), \left(w_1, \cdots, w_{\mu}\right)\right) -/// h_{[w]}\left(f_1\left(x_1, \cdots, x_{\nu}\right), \cdots, -/// f_l\left(x_1, \cdots, x_{\nu}\right)\right) -/// $$ -/// -/// where $r_i$ is the sum-check round challenges of the first $\mu - 1$ rounds. -/// -/// The current function executes the last $\nu$ parts of the sum-check and uses -/// the [`LogUpGkrEvaluator`] to evaluate $g_i$ and $h_i$ during the computation of the evaluations -/// of the round polynomials. -/// -/// As an optimization, the function uses the five polynomials, refered to as [`merged_mls`]: -/// -/// 1. $p_n\left(0, r_1, \cdots, r_{\mu - 1}, x_1, \cdots, x_{\nu}\right)$ -/// 2. $p_n\left(1, r_1, \cdots, r_{\mu - 1}, x_1, \cdots, x_{\nu}\right)$ -/// 3. $q_n\left(0, r_1, \cdots, r_{\mu - 1}, x_1, \cdots, x_{\nu}\right)$ -/// 4. $q_n\left(1, r_1, \cdots, r_{\mu - 1}, x_1, \cdots, x_{\nu}\right)$ -/// 5. $$\left(y_1, \cdots, y_{\nu}\right) \longrightarrow -/// EQ\left(\left(t_1, \cdots, t_{\mu + \nu - 1}\right), -/// \left(r_1, \cdots, r_{\mu - 1}, y_1, \cdots, y_{\nu}\right)\right) -/// $$ -/// where $t_i$ is the sum-check randomness from the previous layer. -/// -/// -/// [1]: https://eprint.iacr.org/2023/1284 -#[allow(clippy::too_many_arguments)] -pub fn sum_check_prove_higher_degree< - E: FieldElement, - C: RandomCoin, - H: ElementHasher, ->( - evaluator: &impl LogUpGkrEvaluator::BaseField>, - claim: E, - r_sum_check: E, - rand_merge: Vec, - log_up_randomness: Vec, - merged_mls: &mut [MultiLinearPoly], - mls: &mut [MultiLinearPoly], - coin: &mut C, -) -> Result, SumCheckProverError> { - let num_rounds = mls[0].num_variables(); - - let mut round_proofs = vec![]; - - // setup first round claim - let mut current_round_claim = SumCheckRoundClaim { eval_point: vec![], claim }; - - // compute, for all (w_1, \cdots, w_{\mu - 1}) in {0, 1}^{\mu - 1}: - // EQ\left(\left(r_1, \cdots, r_{\mu - 1}\right), \left(w_1, \cdots, w_{\mu - 1}\right)\right) - let tensored_merge_randomness = EqFunction::ml_at(rand_merge.to_vec()).evaluations().to_vec(); - - // run the first round of the protocol - let round_poly_evals = sumcheck_round( - evaluator, - mls, - merged_mls, - &log_up_randomness, - r_sum_check, - &tensored_merge_randomness, - ); - let round_poly_coefs = round_poly_evals.to_poly(current_round_claim.claim); - - // reseed with the s_0 polynomial - coin.reseed(H::hash_elements(&round_poly_coefs.0)); - round_proofs.push(RoundProof { round_poly_coefs }); - - for i in 1..num_rounds { - // generate random challenge r_i for the i-th round - let round_challenge = - coin.draw().map_err(|_| SumCheckProverError::FailedToGenerateChallenge)?; - - // compute the new reduced round claim - let new_round_claim = - reduce_claim(&round_proofs[i - 1], current_round_claim, round_challenge); - - // fold each multi-linear using the round challenge - mls.iter_mut() - .for_each(|ml| ml.bind_least_significant_variable(round_challenge)); - - // fold each merged multi-linear using the round challenge - merged_mls - .iter_mut() - .for_each(|ml| ml.bind_least_significant_variable(round_challenge)); - - // run the i-th round of the protocol using the folded multi-linears for the new reduced - // claim. This basically computes the s_i polynomial. - let round_poly_evals = sumcheck_round( - evaluator, - mls, - merged_mls, - &log_up_randomness, - r_sum_check, - &tensored_merge_randomness, - ); - - // update the claim - current_round_claim = new_round_claim; - - let round_poly_coefs = round_poly_evals.to_poly(current_round_claim.claim); - - // reseed with the s_i polynomial - coin.reseed(H::hash_elements(&round_poly_coefs.0)); - let round_proof = RoundProof { round_poly_coefs }; - round_proofs.push(round_proof); - } - - // generate the last random challenge - let round_challenge = - coin.draw().map_err(|_| SumCheckProverError::FailedToGenerateChallenge)?; - - // fold each multi-linear using the last random round challenge - mls.iter_mut() - .for_each(|ml| ml.bind_least_significant_variable(round_challenge)); - // fold each merged multi-linear using the last random round challenge - merged_mls - .iter_mut() - .for_each(|ml| ml.bind_least_significant_variable(round_challenge)); - - let SumCheckRoundClaim { eval_point, claim: _claim } = - reduce_claim(&round_proofs[num_rounds - 1], current_round_claim, round_challenge); - - let openings = mls.iter_mut().map(|ml| ml.evaluations()[0]).collect(); - - Ok(SumCheckProof { - openings_claim: FinalOpeningClaim { eval_point, openings }, - round_proofs, - }) -} - -/// Computes the polynomial -/// -/// $$ -/// s_i(X_i) := \sum_{(x_{i + 1},\cdots, x_{\nu - 1}) -/// w(r_0,\cdots, r_{i - 1}, X_i, x_{i + 1}, \cdots, x_{\nu - 1}). -/// $$ -/// -/// where -/// -/// $$ -/// w(x_0,\cdots, x_{\nu - 1}) := g(f_0((x_0,\cdots, x_{\nu - 1})), -/// \cdots , f_c((x_0,\cdots, x_{\nu - 1}))). -/// $$ -/// -/// where `g` is the expression defined in the documentation of [`sum_check_prove_higher_degree`] -/// -/// Given a degree bound `d_max` for all variables, it suffices to compute the evaluations of `s_i` -/// at `d_max + 1` points. Given that $s_{i}(0) = s_{i}(1) - s_{i - 1}(r_{i - 1})$ it is sufficient -/// to compute the evaluations on only `d_max` points. -/// -/// The algorithm works by iterating over the variables $(x_{i + 1}, \cdots, x_{\nu - 1})$ in -/// ${0, 1}^{\nu - 1 - i}$. For each such tuple, we store the evaluations of the (folded) -/// multi-linears at $(0, x_{i + 1}, \cdots, x_{\nu - 1})$ and -/// $(1, x_{i + 1}, \cdots, x_{\nu - 1})$ in two arrays, `evals_zero` and `evals_one`. -/// Using `evals_one`, remember that we optimize evaluating at 0 away, we get the first evaluation -/// i.e., $s_i(1)$. -/// -/// For the remaining evaluations, we use the fact that the folded `f_i` is multi-linear and hence -/// we can write -/// -/// $$ -/// f_i(X_i, x_{i + 1}, \cdots, x_{\nu - 1}) = -/// (1 - X_i) . f_i(0, x_{i + 1}, \cdots, x_{\nu - 1}) + -/// X_i . f_i(1, x_{i + 1}, \cdots, x_{\nu - 1}) -/// $$ -/// -/// Note that we omitted writing the folding randomness for readability. -/// Since the evaluation domain is $\{0, 1, ... , d_max\}$, we can compute the evaluations based on -/// the previous one using only additions. This is the purpose of `deltas`, to hold the increments -/// added to each multi-linear to compute the evaluation at the next point, and `evals_x` to hold -/// the current evaluation at $x$ in $\{2, ... , d_max\}$. -fn sumcheck_round( - evaluator: &impl LogUpGkrEvaluator::BaseField>, - mls: &[MultiLinearPoly], - merged_mls: &[MultiLinearPoly], - log_up_randomness: &[E], - r_sum_check: E, - tensored_merge_randomness: &[E], -) -> CompressedUnivariatePolyEvals { - let num_ml = mls.len(); - let num_vars = mls[0].num_variables(); - let num_rounds = num_vars - 1; - let mut evals_one = vec![E::ZERO; num_ml]; - let mut evals_zero = vec![E::ZERO; num_ml]; - let mut evals_x = vec![E::ZERO; num_ml]; - - let mut deltas = vec![E::ZERO; num_ml]; - - let mut numerators = vec![E::ZERO; evaluator.get_num_fractions()]; - let mut denominators = vec![E::ZERO; evaluator.get_num_fractions()]; - - let total_evals = (0..1 << num_rounds).map(|i| { - let mut total_evals = vec![E::ZERO; evaluator.max_degree()]; - - for (j, ml) in mls.iter().enumerate() { - evals_zero[j] = ml.evaluations()[2 * i]; - - evals_one[j] = ml.evaluations()[2 * i + 1]; - } - - let eq_at_zero = merged_mls[4].evaluations()[2 * i]; - let eq_at_one = merged_mls[4].evaluations()[2 * i + 1]; - - let p0 = merged_mls[0][2 * i + 1]; - let p1 = merged_mls[1][2 * i + 1]; - let q0 = merged_mls[2][2 * i + 1]; - let q1 = merged_mls[3][2 * i + 1]; - - total_evals[0] = comb_func(&p0, &p1, &q0, &q1, &eq_at_one, &r_sum_check); - - evals_zero - .iter() - .zip(evals_one.iter().zip(deltas.iter_mut().zip(evals_x.iter_mut()))) - .for_each(|(a0, (a1, (delta, evx)))| { - *delta = *a1 - *a0; - *evx = *a1; - }); - let eq_delta = eq_at_one - eq_at_zero; - let mut eq_x = eq_at_one; - - for e in total_evals.iter_mut().skip(1) { - evals_x.iter_mut().zip(deltas.iter()).for_each(|(evx, delta)| { - *evx += *delta; - }); - eq_x += eq_delta; - - evaluator.evaluate_query( - &evals_x, - log_up_randomness, - &mut numerators, - &mut denominators, - ); - - *e = evaluate_composition_poly( - &numerators, - &denominators, - eq_x, - r_sum_check, - tensored_merge_randomness, - ); - } - - total_evals - }); - - let evaluations = total_evals.fold(vec![E::ZERO; evaluator.max_degree()], |mut acc, evals| { - acc.iter_mut().zip(evals.iter()).for_each(|(a, ev)| *a += *ev); - acc - }); - - CompressedUnivariatePolyEvals(evaluations.into()) -} - -/// Sum-check prover for non-linear multivariate polynomial of the simple LogUp-GKR. -/// -/// More specifically, the following function implements the logic of the sum-check prover as -/// described in Section 3.2 in [1], that is, given verifier challenges , the following implements -/// the sum-check prover for the following two statements -/// $$ -/// p_{\nu - \kappa}\left(v_{\kappa+1}, \cdots, v_{\nu}\right) = \sum_{w_i} -/// EQ\left(\left(v_{\kappa+1}, \cdots, v_{\nu}\right), \left(w_{\kappa+1}, \cdots, -/// w_{\nu}\right)\right) \cdot -/// \left( p_{\nu-\kappa+1}\left(1, w_{\kappa+1}, \cdots, w_{\nu}\right) \cdot -/// q_{\nu-\kappa+1}\left(0, w_{\kappa+1}, \cdots, w_{\nu}\right) + -/// p_{\nu-\kappa+1}\left(0, w_{\kappa+1}, \cdots, w_{\nu}\right) \cdot -/// q_{\nu-\kappa+1}\left(1, w_{\kappa+1}, \cdots, w_{\nu}\right)\right) -/// $$ -/// -/// and -/// -/// $$ -/// q_{\nu -k}\left(v_{\kappa+1}, \cdots, v_{\nu}\right) = \sum_{w_i}EQ\left(\left(v_{\kappa+1}, -/// \cdots, v_{\nu}\right), \left(w_{\kappa+1}, \cdots, w_{\nu }\right)\right) \cdot -/// \left( q_{\nu-\kappa+1}\left(1, w_{\kappa+1}, \cdots, w_{\nu}\right) \cdot -/// q_{\nu-\kappa+1}\left(0, w_{\kappa+1}, \cdots, w_{\nu}\right)\right) -/// $$ -/// -/// for $k = 1, \cdots, \nu - 1$ -/// -/// Instead of executing two runs of the sum-check protocol, a batching randomness `r_batch` is -/// sent by the verifier at the outset in order to batch the two statments. -/// -/// [1]: https://eprint.iacr.org/2023/1284 -#[allow(clippy::too_many_arguments)] -pub fn sumcheck_prove_plain< - E: FieldElement, - C: RandomCoin, - H: ElementHasher, ->( - num_rounds: usize, - claim: E, - r_batch: E, - p0: &mut MultiLinearPoly, - p1: &mut MultiLinearPoly, - q0: &mut MultiLinearPoly, - q1: &mut MultiLinearPoly, - eq: &mut MultiLinearPoly, - transcript: &mut C, -) -> Result<(SumCheckProof, E), SumCheckProverError> { - let mut round_proofs = vec![]; - - let mut claim = claim; - let mut challenges = vec![]; - for _ in 0..num_rounds { - let mut eval_point_0 = E::ZERO; - let mut eval_point_2 = E::ZERO; - let mut eval_point_3 = E::ZERO; - - let len = p0.num_evaluations() / 2; - for i in 0..len { - eval_point_0 += - comb_func(&p0[2 * i], &p1[2 * i], &q0[2 * i], &q1[2 * i], &eq[2 * i], &r_batch); - - let p0_delta = p0[2 * i + 1] - p0[2 * i]; - let p1_delta = p1[2 * i + 1] - p1[2 * i]; - let q0_delta = q0[2 * i + 1] - q0[2 * i]; - let q1_delta = q1[2 * i + 1] - q1[2 * i]; - let eq_delta = eq[2 * i + 1] - eq[2 * i]; - - let mut p0_evx = p0[2 * i + 1] + p0_delta; - let mut p1_evx = p1[2 * i + 1] + p1_delta; - let mut q0_evx = q0[2 * i + 1] + q0_delta; - let mut q1_evx = q1[2 * i + 1] + q1_delta; - let mut eq_evx = eq[2 * i + 1] + eq_delta; - eval_point_2 += comb_func(&p0_evx, &p1_evx, &q0_evx, &q1_evx, &eq_evx, &r_batch); - - p0_evx += p0_delta; - p1_evx += p1_delta; - q0_evx += q0_delta; - q1_evx += q1_delta; - eq_evx += eq_delta; - eval_point_3 += comb_func(&p0_evx, &p1_evx, &q0_evx, &q1_evx, &eq_evx, &r_batch); - } - - let evals = vec![ - claim - eval_point_0, // Optimization applied using the claim to reduce the number of sums computed - eval_point_2, - eval_point_3, - ]; - let poly = CompressedUnivariatePolyEvals(evals.into()); - let round_poly_coefs = poly.to_poly(claim); - - // reseed with the s_i polynomial - transcript.reseed(H::hash_elements(&round_poly_coefs.0)); - let round_proof = RoundProof { - round_poly_coefs: round_poly_coefs.clone(), - }; - - round_proofs.push(round_proof); - - let round_challenge = - transcript.draw().map_err(|_| SumCheckProverError::FailedToGenerateChallenge)?; - - // compute the new reduced round claim - let new_claim = round_poly_coefs.evaluate_using_claim(&claim, &round_challenge); - - // fold each multi-linear using the round challenge - p0.bind_least_significant_variable(round_challenge); - p1.bind_least_significant_variable(round_challenge); - q0.bind_least_significant_variable(round_challenge); - q1.bind_least_significant_variable(round_challenge); - eq.bind_least_significant_variable(round_challenge); - - challenges.push(round_challenge); - - claim = new_claim; - } - - Ok(( - SumCheckProof { - openings_claim: FinalOpeningClaim { - eval_point: challenges, - openings: vec![p0[0], p1[0], q0[0], q1[0]], - }, - round_proofs, - }, - claim, - )) -} - -/// The non-linear composition polynomial of the LogUp-GKR protocol. -/// -/// This is the result of batching the `p_k` and `q_k` of section 3.2 in -/// https://eprint.iacr.org/2023/1284.pdf. -fn comb_func(p0: &E, p1: &E, q0: &E, q1: &E, eq: &E, r_batch: &E) -> E { - (*p0 * *q1 + *p1 * *q0 + *r_batch * *q0 * *q1) * *eq -} - -/// Reduces an old claim to a new claim using the round challenge. -pub fn reduce_claim( - current_poly: &RoundProof, - current_round_claim: SumCheckRoundClaim, - round_challenge: E, -) -> SumCheckRoundClaim { - // evaluate the round polynomial at the round challenge to obtain the new claim - let new_claim = current_poly - .round_poly_coefs - .evaluate_using_claim(¤t_round_claim.claim, &round_challenge); - - // update the evaluation point using the round challenge - let mut new_partial_eval_point = current_round_claim.eval_point; - new_partial_eval_point.push(round_challenge); - - SumCheckRoundClaim { - eval_point: new_partial_eval_point, - claim: new_claim, - } -} diff --git a/sumcheck/src/utils/univariate.rs b/sumcheck/src/utils/univariate.rs index 8cd56e683..082a4daf9 100644 --- a/sumcheck/src/utils/univariate.rs +++ b/sumcheck/src/utils/univariate.rs @@ -12,7 +12,12 @@ use utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serial // CONSTANTS // ================================================================================================ -/// Maximum expected size of the round polynomials. This is needed for `SmallVec`. +/// Maximum expected size of the round polynomials. This is needed for `SmallVec`. The size of +/// the round polynomials is dictated by the degree of the non-linearity in the sum-check statement +/// which is direcly influenced by the maximal degrees of the numerators and denominators appearing +/// in the LogUp-GKR relation and equal to one plus the maximal degree of the numerators and +/// maximal degree of denominators. +/// The following value assumes that this degree is at most 10. const MAX_POLY_SIZE: usize = 10; // COMPRESSED UNIVARIATE POLYNOMIAL From 7e24f8f3ba9cb4054ffb4cdfc725e0d97ab020b7 Mon Sep 17 00:00:00 2001 From: Al-Kindi-0 <82364884+Al-Kindi-0@users.noreply.github.com> Date: Fri, 9 Aug 2024 10:17:44 +0200 Subject: [PATCH 09/28] chore: remove utils mod --- sumcheck/src/lib.rs | 7 +- sumcheck/src/prover/mod.rs | 1 - sumcheck/src/utils/mod.rs | 10 - sumcheck/src/utils/multilinear.rs | 278 ---------------------------- sumcheck/src/utils/univariate.rs | 295 ------------------------------ 5 files changed, 5 insertions(+), 586 deletions(-) delete mode 100644 sumcheck/src/utils/mod.rs delete mode 100644 sumcheck/src/utils/multilinear.rs delete mode 100644 sumcheck/src/utils/univariate.rs diff --git a/sumcheck/src/lib.rs b/sumcheck/src/lib.rs index f3b36d392..7c32abe63 100644 --- a/sumcheck/src/lib.rs +++ b/sumcheck/src/lib.rs @@ -19,8 +19,11 @@ pub use prover::*; mod verifier; pub use verifier::*; -mod utils; -pub use utils::*; +mod univariate; +pub use univariate::{CompressedUnivariatePoly, CompressedUnivariatePolyEvals}; + +mod multilinear; +pub use multilinear::{EqFunction, MultiLinearPoly}; /// Represents an opening claim at an evaluation point against a batch of oracles. /// diff --git a/sumcheck/src/prover/mod.rs b/sumcheck/src/prover/mod.rs index bdf1aebb9..13d35e551 100644 --- a/sumcheck/src/prover/mod.rs +++ b/sumcheck/src/prover/mod.rs @@ -11,4 +11,3 @@ pub use plain::sumcheck_prove_plain; mod error; pub use error::SumCheckProverError; - diff --git a/sumcheck/src/utils/mod.rs b/sumcheck/src/utils/mod.rs deleted file mode 100644 index d57e05677..000000000 --- a/sumcheck/src/utils/mod.rs +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. -// -// This source code is licensed under the MIT license found in the -// LICENSE file in the root directory of this source tree. - -mod univariate; -pub use univariate::{CompressedUnivariatePoly, CompressedUnivariatePolyEvals}; - -mod multilinear; -pub use multilinear::{EqFunction, MultiLinearPoly}; diff --git a/sumcheck/src/utils/multilinear.rs b/sumcheck/src/utils/multilinear.rs deleted file mode 100644 index 0ad5f6a18..000000000 --- a/sumcheck/src/utils/multilinear.rs +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. -// -// This source code is licensed under the MIT license found in the -// LICENSE file in the root directory of this source tree. - -use alloc::vec::Vec; -use core::ops::Index; - -use math::FieldElement; -#[cfg(feature = "concurrent")] -pub use rayon::prelude::*; -use smallvec::SmallVec; - -// MULTI-LINEAR POLYNOMIAL -// ================================================================================================ - -/// Represents a multi-linear polynomial. -/// -/// The representation stores the evaluations of the polynomial over the boolean hyper-cube -/// ${0 , 1}^ν$. -#[derive(Clone, Debug, PartialEq)] -pub struct MultiLinearPoly { - evaluations: Vec, -} - -impl MultiLinearPoly { - /// Constructs a [`MultiLinearPoly`] from its evaluations over the boolean hyper-cube ${0 , 1}^ν$. - pub fn from_evaluations(evaluations: Vec) -> Self { - assert!(evaluations.len().is_power_of_two(), "A multi-linear polynomial should have a power of 2 number of evaluations over the Boolean hyper-cube"); - Self { evaluations } - } - - /// Returns the number of variables of the multi-linear polynomial. - pub fn num_variables(&self) -> usize { - self.evaluations.len().trailing_zeros() as usize - } - - /// Returns the evaluations over the boolean hyper-cube. - pub fn evaluations(&self) -> &[E] { - &self.evaluations - } - - /// Returns the number of evaluations. This is equal to the size of the boolean hyper-cube. - pub fn num_evaluations(&self) -> usize { - self.evaluations.len() - } - - /// Evaluate the multi-linear at some query $(r_0, ..., r_{ν - 1}) ∈ 𝔽^ν$. - /// - /// It first computes the evaluations of the Lagrange basis polynomials over the interpolating - /// set ${0 , 1}^ν$ at $(r_0, ..., r_{ν - 1})$ i.e., the Lagrange kernel at $(r_0, ..., r_{ν - 1})$. - /// The evaluation then is the inner product, indexed by ${0 , 1}^ν$, of the vector of - /// evaluations times the Lagrange kernel. - pub fn evaluate(&self, query: &[E]) -> E { - let tensored_query = compute_lagrange_basis_evals_at(query); - inner_product(&self.evaluations, &tensored_query) - } - - /// Similar to [`Self::evaluate`], except that the query was already turned into the Lagrange - /// kernel (i.e. the [`lagrange_ker::EqFunction`] evaluated at every point in the set - /// `${0 , 1}^ν$`). - /// - /// This is more efficient than [`Self::evaluate`] when multiple different [`MultiLinearPoly`] - /// need to be evaluated at the same query point. - pub fn evaluate_with_lagrange_kernel(&self, lagrange_kernel: &[E]) -> E { - inner_product(&self.evaluations, lagrange_kernel) - } - - /// Computes $f(r_0, y_1, ..., y_{ν - 1})$ using the linear interpolation formula - /// $(1 - r_0) * f(0, y_1, ..., y_{ν - 1}) + r_0 * f(1, y_1, ..., y_{ν - 1})$ and assigns - /// the resulting multi-linear, defined over a domain of half the size, to `self`. - pub fn bind_least_significant_variable(&mut self, round_challenge: E) { - let num_evals = self.evaluations.len() >> 1; - for i in 0..num_evals { - self.evaluations[i] = self.evaluations[i << 1] - + round_challenge * (self.evaluations[(i << 1) + 1] - self.evaluations[i << 1]); - } - self.evaluations.truncate(num_evals) - } - - /// Given the multilinear polynomial $f(y_0, y_1, ..., y_{ν - 1})$, returns two polynomials: - /// $f(0, y_1, ..., y_{ν - 1})$ and $f(1, y_1, ..., y_{ν - 1})$. - pub fn project_least_significant_variable(&self) -> (Self, Self) { - let mut p0 = Vec::with_capacity(self.num_evaluations() / 2); - let mut p1 = Vec::with_capacity(self.num_evaluations() / 2); - for chunk in self.evaluations.chunks_exact(2) { - p0.push(chunk[0]); - p1.push(chunk[1]); - } - - (MultiLinearPoly::from_evaluations(p0), MultiLinearPoly::from_evaluations(p1)) - } -} - -impl Index for MultiLinearPoly { - type Output = E; - - fn index(&self, index: usize) -> &E { - &(self.evaluations[index]) - } -} - -// EQ FUNCTION -// ================================================================================================ - -/// Maximal expected size of the point of a given Lagrange kernel. -const MAX_EQ_SIZE: usize = 25; - -/// The EQ (equality) function is the binary function defined by -/// -/// $$ -/// EQ: {0 , 1}^ν ⛌ {0 , 1}^ν ⇾ {0 , 1} -/// ((x_0, ..., x_{ν - 1}), (y_0, ..., y_{ν - 1})) ↦ \prod_{i = 0}^{ν - 1} (x_i * y_i + (1 - x_i) -/// * (1 - y_i)) -/// $$ -/// -/// Taking its multi-linear extension $EQ^{~}$, we can define a basis for the set of multi-linear -/// polynomials in ν variables by -/// $${EQ^{~}(., (y_0, ..., y_{ν - 1})): (y_0, ..., y_{ν - 1}) ∈ {0 , 1}^ν}$$ -/// where each basis function is a function of its first argument. This is called the Lagrange or -/// evaluation basis for evaluation set ${0 , 1}^ν$. -/// -/// Given a function $(f: {0 , 1}^ν ⇾ 𝔽)$, its multi-linear extension (i.e., the unique -/// mult-linear polynomial extending `f` to $(f^{~}: 𝔽^ν ⇾ 𝔽)$ and agreeing with it on ${0 , 1}^ν$) is -/// defined as the summation of the evaluations of f against the Lagrange basis. -/// More specifically, given $(r_0, ..., r_{ν - 1}) ∈ 𝔽^ν$, then: -/// -/// $$ -/// f^{~}(r_0, ..., r_{ν - 1}) = \sum_{(y_0, ..., y_{ν - 1}) ∈ {0 , 1}^ν} -/// f(y_0, ..., y_{ν - 1}) EQ^{~}((r_0, ..., r_{ν - 1}), (y_0, ..., y_{ν - 1})) -/// $$ -/// -/// We call the Lagrange kernel the evaluation of the EQ^{~} function at -/// $((r_0, ..., r_{ν - 1}), (y_0, ..., y_{ν - 1}))$ for all $(y_0, ..., y_{ν - 1}) ∈ {0 , 1}^ν$ for -/// a fixed $(r_0, ..., r_{ν - 1}) ∈ 𝔽^ν$. -/// -/// [`EqFunction`] represents EQ^{~} the multi-linear extension of -/// -/// $((y_0, ..., y_{ν - 1}) ↦ EQ((r_0, ..., r_{ν - 1}), (y_0, ..., y_{ν - 1})))$ -/// -/// and contains a method to generate the Lagrange kernel for defining evaluations of multi-linear -/// extensions of arbitrary functions $(f: {0 , 1}^ν ⇾ 𝔽)$ at a given point $(r_0, ..., r_{ν - 1})$ -/// as well as a method to evaluate $EQ^{~}((r_0, ..., r_{ν - 1}), (t_0, ..., t_{ν - 1})))$ for -/// $(t_0, ..., t_{ν - 1}) ∈ 𝔽^ν$. -pub struct EqFunction { - r: SmallVec<[E; MAX_EQ_SIZE]>, -} - -impl EqFunction { - /// Creates a new [EqFunction]. - pub fn new(r: Vec) -> Self { - let tmp = r.into(); - EqFunction { r: tmp } - } - - /// Computes $EQ((r_0, ..., r_{ν - 1}), (t_0, ..., t_{ν - 1})))$. - pub fn evaluate(&self, t: &[E]) -> E { - assert_eq!(self.r.len(), t.len()); - - (0..self.r.len()) - .map(|i| self.r[i] * t[i] + (E::ONE - self.r[i]) * (E::ONE - t[i])) - .fold(E::ONE, |acc, term| acc * term) - } - - /// Computes $EQ((r_0, ..., r_{ν - 1}), (y_0, ..., y_{ν - 1}))$ for all - /// $(y_0, ..., y_{ν - 1}) ∈ {0 , 1}^ν$ i.e., the Lagrange kernel at $r = (r_0, ..., r_{ν - 1})$. - pub fn evaluations(&self) -> Vec { - compute_lagrange_basis_evals_at(&self.r) - } - - /// Returns the evaluations of - /// $((y_0, ..., y_{ν - 1}) ↦ EQ^{~}((r_0, ..., r_{ν - 1}), (y_0, ..., y_{ν - 1})))$ - /// over ${0 , 1}^ν$. - pub fn ml_at(evaluation_point: Vec) -> MultiLinearPoly { - let eq_evals = EqFunction::new(evaluation_point.clone()).evaluations(); - MultiLinearPoly::from_evaluations(eq_evals) - } -} - -// HELPER -// ================================================================================================ - -/// Computes the evaluations of the Lagrange basis polynomials over the interpolating -/// set ${0 , 1}^ν$ at $(r_0, ..., r_{ν - 1})$ i.e., the Lagrange kernel at $(r_0, ..., r_{ν - 1})$. -/// -/// TODO: This is a critical function and parallelizing would have a significant impact on -/// performance. -fn compute_lagrange_basis_evals_at(query: &[E]) -> Vec { - let nu = query.len(); - let n = 1 << nu; - - let mut evals: Vec = vec![E::ONE; n]; - let mut size = 1; - for r_i in query.iter().rev() { - size *= 2; - for i in (0..size).rev().step_by(2) { - let scalar = evals[i / 2]; - evals[i] = scalar * *r_i; - evals[i - 1] = scalar - evals[i]; - } - } - evals -} - -/// Computes the inner product in the extension field of two slices with the same number of items. -/// -/// If `concurrent` feature is enabled, this function can make use of multi-threading. -pub fn inner_product(x: &[E], y: &[E]) -> E { - #[cfg(not(feature = "concurrent"))] - return x.iter().zip(y.iter()).fold(E::ZERO, |acc, (x_i, y_i)| acc + *x_i * *y_i); - - #[cfg(feature = "concurrent")] - return x - .par_iter() - .zip(y.par_iter()) - .map(|(x_i, y_i)| *x_i * *y_i) - .reduce(|| E::ZERO, |a, b| a + b); -} - -// TESTS -// ================================================================================================ - -#[test] -fn multi_linear_sanity_checks() { - use math::fields::f64::BaseElement; - let nu = 3; - let n = 1 << nu; - - // the zero multi-linear should evaluate to zero - let p = MultiLinearPoly::from_evaluations(vec![BaseElement::ZERO; n]); - let challenge: Vec = rand_utils::rand_vector(nu); - - assert_eq!(BaseElement::ZERO, p.evaluate(&challenge)); - - // the constant multi-linear should be constant everywhere - let constant = rand_utils::rand_value(); - let p = MultiLinearPoly::from_evaluations(vec![constant; n]); - let challenge: Vec = rand_utils::rand_vector(nu); - - assert_eq!(constant, p.evaluate(&challenge)) -} - -#[test] -fn test_bind() { - use math::fields::f64::BaseElement; - let mut p = MultiLinearPoly::from_evaluations(vec![BaseElement::ONE; 8]); - let expected = MultiLinearPoly::from_evaluations(vec![BaseElement::ONE; 4]); - - let challenge = rand_utils::rand_value(); - p.bind_least_significant_variable(challenge); - assert_eq!(p, expected) -} - -#[test] -fn test_eq_function() { - use math::fields::f64::BaseElement; - use rand_utils::rand_value; - - let one = BaseElement::ONE; - - // Lagrange kernel is computed correctly - let r0 = rand_value(); - let r1 = rand_value(); - let eq_function = EqFunction::new(vec![r0, r1]); - - let expected = vec![(one - r0) * (one - r1), r0 * (one - r1), (one - r0) * r1, r0 * r1]; - - assert_eq!(expected, eq_function.evaluations()); - - // Lagrange kernel evaluation is correct - let q0 = rand_value(); - let q1 = rand_value(); - let tensored_query = vec![(one - q0) * (one - q1), q0 * (one - q1), (one - q0) * q1, q0 * q1]; - - let expected = inner_product(&tensored_query, &eq_function.evaluations()); - - assert_eq!(expected, eq_function.evaluate(&[q0, q1])) -} diff --git a/sumcheck/src/utils/univariate.rs b/sumcheck/src/utils/univariate.rs deleted file mode 100644 index 082a4daf9..000000000 --- a/sumcheck/src/utils/univariate.rs +++ /dev/null @@ -1,295 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. -// -// This source code is licensed under the MIT license found in the -// LICENSE file in the root directory of this source tree. - -use alloc::vec::Vec; - -use math::{batch_inversion, polynom, FieldElement}; -use smallvec::SmallVec; -use utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable}; - -// CONSTANTS -// ================================================================================================ - -/// Maximum expected size of the round polynomials. This is needed for `SmallVec`. The size of -/// the round polynomials is dictated by the degree of the non-linearity in the sum-check statement -/// which is direcly influenced by the maximal degrees of the numerators and denominators appearing -/// in the LogUp-GKR relation and equal to one plus the maximal degree of the numerators and -/// maximal degree of denominators. -/// The following value assumes that this degree is at most 10. -const MAX_POLY_SIZE: usize = 10; - -// COMPRESSED UNIVARIATE POLYNOMIAL -// ================================================================================================ - -/// The coefficients of a univariate polynomial of degree n with the linear term coefficient -/// omitted. -/// -/// This compressed representation is useful during the sum-check protocol as the full uncompressed -/// representation can be recovered from the compressed one and the current sum-check round claim. -#[derive(Clone, Debug, PartialEq)] -pub struct CompressedUnivariatePoly(pub(crate) SmallVec<[E; MAX_POLY_SIZE]>); - -impl CompressedUnivariatePoly { - /// Evaluates a polynomial at a challenge point using a round claim. - /// - /// The round claim is used to recover the coefficient of the linear term using the relation - /// 2 * c0 + c1 + ... c_{n - 1} = claim. Using the complete list of coefficients, the polynomial - /// is then evaluated using Horner's method. - pub fn evaluate_using_claim(&self, claim: &E, challenge: &E) -> E { - // recover the coefficient of the linear term - let c1 = *claim - self.0.iter().fold(E::ZERO, |acc, term| acc + *term) - self.0[0]; - - // construct the full coefficient list - let mut complete_coefficients = vec![self.0[0], c1]; - complete_coefficients.extend_from_slice(&self.0[1..]); - - // evaluate - polynom::eval(&complete_coefficients, *challenge) - } -} - -impl Serializable for CompressedUnivariatePoly { - fn write_into(&self, target: &mut W) { - let vector: Vec = self.0.clone().into_vec(); - vector.write_into(target); - } -} - -impl Deserializable for CompressedUnivariatePoly -where - E: FieldElement, -{ - fn read_from(source: &mut R) -> Result { - let vector: Vec = Vec::::read_from(source)?; - Ok(Self(vector.into())) - } -} - -/// The evaluations of a univariate polynomial of degree n at 0, 1, ..., n with the evaluation at 0 -/// omitted. -/// -/// This compressed representation is useful during the sum-check protocol as the full uncompressed -/// representation can be recovered from the compressed one and the current sum-check round claim. -#[derive(Clone, Debug)] -pub struct CompressedUnivariatePolyEvals(pub(crate) SmallVec<[E; MAX_POLY_SIZE]>); - -impl CompressedUnivariatePolyEvals { - /// Gives the coefficient representation of a polynomial represented in evaluation form. - /// - /// Since the evaluation at 0 is omitted, we need to use the round claim to recover - /// the evaluation at 0 using the identity $p(0) + p(1) = claim$. - /// Now, we have that for any polynomial $p(x) = c0 + c1 * x + ... + c_{n-1} * x^{n - 1}$: - /// - /// 1. $p(0) = c0$. - /// 2. $p(x) = c0 + x * q(x) where q(x) = c1 + ... + c_{n-1} * x^{n - 2}$. - /// - /// This means that we can compute the evaluations of q at 1, ..., n - 1 using the evaluations - /// of p and thus reduce by 1 the size of the interpolation problem. - /// Once the coefficient of q are recovered, the c0 coefficient is appended to these and this - /// is precisely the coefficient representation of the original polynomial q. - /// Note that the coefficient of the linear term is removed as this coefficient can be recovered - /// from the remaining coefficients, again, using the round claim using the relation - /// $2 * c0 + c1 + ... c_{n - 1} = claim$. - pub fn to_poly(&self, round_claim: E) -> CompressedUnivariatePoly { - // construct the vector of interpolation points 1, ..., n - let n_minus_1 = self.0.len(); - let points = (1..=n_minus_1 as u32).map(E::BaseField::from).collect::>(); - - // construct their inverses. These will be needed for computing the evaluations - // of the q polynomial as well as for doing the interpolation on q - let points_inv = batch_inversion(&points); - - // compute the zeroth coefficient - let c0 = round_claim - self.0[0]; - - // compute the evaluations of q - let q_evals: Vec = self - .0 - .iter() - .enumerate() - .map(|(i, evals)| (*evals - c0).mul_base(points_inv[i])) - .collect(); - - // interpolate q - let q_coefs = multiply_by_inverse_vandermonde(&q_evals, &points_inv); - - // append c0 to the coefficients of q to get the coefficients of p. The linear term - // coefficient is removed as this can be recovered from the other coefficients using - // the reduced claim. - let mut coefficients = SmallVec::with_capacity(self.0.len() + 1); - coefficients.push(c0); - coefficients.extend_from_slice(&q_coefs[1..]); - - CompressedUnivariatePoly(coefficients) - } -} - -// HELPER FUNCTIONS -// ================================================================================================ - -/// Given a (row) vector `v`, computes the vector-matrix product `v * V^{-1}` where `V` is -/// the Vandermonde matrix over the points `1, ..., n` where `n` is the length of `v`. -/// The resulting vector will then be the coefficients of the minimal interpolating polynomial -/// through the points `(i+1, v[i])` for `i` in `0, ..., n - 1` -/// -/// The naive way would be to invert the matrix `V` and then compute the vector-matrix product -/// this will cost `O(n^3)` operations and `O(n^2)` memory. We can also try Gaussian elimination -/// but this is also worst case `O(n^3)` operations and `O(n^2)` memory. -/// In the following implementation, we use the fact that the points over which we are interpolating -/// is a set of equidistant points and thus both the Vandermonde matrix and its inverse can be -/// described by sparse linear recurrence equations. -/// More specifically, we use the representation given in [1], where `V^{-1}` is represented as -/// `U * M` where: -/// -/// 1. `M` is a lower triangular matrix where its entries are given by M(i, j) = M(i - 1, j) - M(i - -/// 1, j - 1) / (i - 1) with boundary conditions M(i, 1) = 1 and M(i, j) = 0 when j > i. -/// -/// 2. `U` is an upper triangular (involutory) matrix where its entries are given by U(i, j) = U(i, -/// j - 1) - U(i - 1, j - 1) with boundary condition U(1, j) = 1 and U(i, j) = 0 when i > j. -/// -/// Note that the matrix indexing in the formulas above matches the one in the reference and starts -/// from 1. -/// -/// The above implies that we can do the vector-matrix multiplication in `O(n^2)` and using only -/// `O(n)` space. -/// -/// [1]: https://link.springer.com/article/10.1007/s002110050360 -fn multiply_by_inverse_vandermonde( - vector: &[E], - nodes_inv: &[E::BaseField], -) -> Vec { - let res = multiply_by_u(vector); - multiply_by_m(&res, nodes_inv) -} - -/// Multiplies a (row) vector `v` by an upper triangular matrix `U` to compute `v * U`. -/// -/// `U` is an upper triangular (involutory) matrix with its entries given by -/// U(i, j) = U(i, j - 1) - U(i - 1, j - 1) -/// with boundary condition U(1, j) = 1 and U(i, j) = 0 when i > j. -fn multiply_by_u(vector: &[E]) -> Vec { - let n = vector.len(); - let mut previous_u_col = vec![E::BaseField::ZERO; n]; - previous_u_col[0] = E::BaseField::ONE; - let mut current_u_col = vec![E::BaseField::ZERO; n]; - current_u_col[0] = E::BaseField::ONE; - - let mut result: Vec = vec![E::ZERO; n]; - for (i, res) in result.iter_mut().enumerate() { - *res = vector[0]; - - for (j, v) in vector.iter().enumerate().take(i + 1).skip(1) { - let u_entry: E::BaseField = - compute_u_entry::(j, &mut previous_u_col, &mut current_u_col); - *res += v.mul_base(u_entry); - } - previous_u_col.clone_from(¤t_u_col); - } - - result -} - -/// Multiplies a (row) vector `v` by a lower triangular matrix `M` to compute `v * M`. -/// -/// `M` is a lower triangular matrix with its entries given by -/// M(i, j) = M(i - 1, j) - M(i - 1, j - 1) / (i - 1) -/// with boundary conditions M(i, 1) = 1 and M(i, j) = 0 when j > i. -fn multiply_by_m(vector: &[E], nodes_inv: &[E::BaseField]) -> Vec { - let n = vector.len(); - let mut previous_m_col = vec![E::BaseField::ONE; n]; - let mut current_m_col = vec![E::BaseField::ZERO; n]; - current_m_col[0] = E::BaseField::ONE; - - let mut result: Vec = vec![E::ZERO; n]; - result[0] = vector.iter().fold(E::ZERO, |acc, term| acc + *term); - for (i, res) in result.iter_mut().enumerate().skip(1) { - current_m_col = vec![E::BaseField::ZERO; n]; - - for (j, v) in vector.iter().enumerate().skip(i) { - let m_entry: E::BaseField = - compute_m_entry::(j, &mut previous_m_col, &mut current_m_col, nodes_inv[j - 1]); - *res += v.mul_base(m_entry); - } - previous_m_col.clone_from(¤t_m_col); - } - - result -} - -/// Returns the j-th entry of the i-th column of matrix `U` given the values of the (i - 1)-th -/// column. The i-th column is also updated with the just computed `U(i, j)` entry. -/// -/// `U` is an upper triangular (involutory) matrix with its entries given by -/// U(i, j) = U(i, j - 1) - U(i - 1, j - 1) -/// with boundary condition U(1, j) = 1 and U(i, j) = 0 when i > j. -fn compute_u_entry( - j: usize, - col_prev: &mut [E::BaseField], - col_cur: &mut [E::BaseField], -) -> E::BaseField { - let value = col_prev[j] - col_prev[j - 1]; - col_cur[j] = value; - value -} - -/// Returns the j-th entry of the i-th column of matrix `M` given the values of the (i - 1)-th -/// and the i-th columns. The i-th column is also updated with the just computed `M(i, j)` entry. -/// -/// `M` is a lower triangular matrix with its entries given by -/// M(i, j) = M(i - 1, j) - M(i - 1, j - 1) / (i - 1) -/// with boundary conditions M(i, 1) = 1 and M(i, j) = 0 when j > i. -fn compute_m_entry( - j: usize, - col_previous: &mut [E::BaseField], - col_current: &mut [E::BaseField], - node_inv: E::BaseField, -) -> E::BaseField { - let value = col_current[j - 1] - node_inv * col_previous[j - 1]; - col_current[j] = value; - value -} - -// TESTS -// ================================================================================================ - -#[test] -fn test_poly_partial() { - use math::fields::f64::BaseElement; - - let degree = 1000; - let mut points: Vec = vec![BaseElement::ZERO; degree]; - points - .iter_mut() - .enumerate() - .for_each(|(i, node)| *node = BaseElement::from(i as u32)); - - let p: Vec = rand_utils::rand_vector(degree); - let evals = polynom::eval_many(&p, &points); - - let mut partial_evals = evals.clone(); - partial_evals.remove(0); - - let partial_poly = CompressedUnivariatePolyEvals(partial_evals.into()); - let claim = evals[0] + evals[1]; - let poly_coeff = partial_poly.to_poly(claim); - - let r = rand_utils::rand_vector(1); - - assert_eq!(polynom::eval(&p, r[0]), poly_coeff.evaluate_using_claim(&claim, &r[0])) -} - -#[test] -fn test_serialization() { - use math::fields::f64::BaseElement; - - let original_poly = - CompressedUnivariatePoly(rand_utils::rand_array::().into()); - let poly_bytes = original_poly.to_bytes(); - - let deserialized_poly = - CompressedUnivariatePoly::::read_from_bytes(&poly_bytes).unwrap(); - - assert_eq!(original_poly, deserialized_poly) -} From 23044e8a7640d47d5098ed6233c9a3964abbc4bf Mon Sep 17 00:00:00 2001 From: Al-Kindi-0 <82364884+Al-Kindi-0@users.noreply.github.com> Date: Fri, 9 Aug 2024 10:21:51 +0200 Subject: [PATCH 10/28] chore: remove utils mod --- sumcheck/src/lib.rs | 4 +- sumcheck/src/multilinear.rs | 278 +++++++++++++++++++++ sumcheck/src/prover/high_degree.rs | 382 +++++++++++++++++++++++++++++ sumcheck/src/prover/plain.rs | 140 +++++++++++ sumcheck/src/univariate.rs | 295 ++++++++++++++++++++++ 5 files changed, 1097 insertions(+), 2 deletions(-) create mode 100644 sumcheck/src/multilinear.rs create mode 100644 sumcheck/src/prover/high_degree.rs create mode 100644 sumcheck/src/prover/plain.rs create mode 100644 sumcheck/src/univariate.rs diff --git a/sumcheck/src/lib.rs b/sumcheck/src/lib.rs index 7c32abe63..86ef044a6 100644 --- a/sumcheck/src/lib.rs +++ b/sumcheck/src/lib.rs @@ -161,8 +161,8 @@ pub struct SumCheckRoundClaim { /// /// This is the result of batching the `p_k` and `q_k` of section 3.2 in /// https://eprint.iacr.org/2023/1284.pdf. -fn comb_func(p0: &E, p1: &E, q0: &E, q1: &E, eq: &E, r_batch: &E) -> E { - (*p0 * *q1 + *p1 * *q0 + *r_batch * *q0 * *q1) * *eq +fn comb_func(p0: E, p1: E, q0: E, q1: E, eq: E, r_batch: E) -> E { + (p0 * q1 + p1 * q0 + r_batch * q0 * q1) * eq } /// The non-linear composition polynomial of the LogUp-GKR protocol specific to the input layer. diff --git a/sumcheck/src/multilinear.rs b/sumcheck/src/multilinear.rs new file mode 100644 index 000000000..0ad5f6a18 --- /dev/null +++ b/sumcheck/src/multilinear.rs @@ -0,0 +1,278 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +use alloc::vec::Vec; +use core::ops::Index; + +use math::FieldElement; +#[cfg(feature = "concurrent")] +pub use rayon::prelude::*; +use smallvec::SmallVec; + +// MULTI-LINEAR POLYNOMIAL +// ================================================================================================ + +/// Represents a multi-linear polynomial. +/// +/// The representation stores the evaluations of the polynomial over the boolean hyper-cube +/// ${0 , 1}^ν$. +#[derive(Clone, Debug, PartialEq)] +pub struct MultiLinearPoly { + evaluations: Vec, +} + +impl MultiLinearPoly { + /// Constructs a [`MultiLinearPoly`] from its evaluations over the boolean hyper-cube ${0 , 1}^ν$. + pub fn from_evaluations(evaluations: Vec) -> Self { + assert!(evaluations.len().is_power_of_two(), "A multi-linear polynomial should have a power of 2 number of evaluations over the Boolean hyper-cube"); + Self { evaluations } + } + + /// Returns the number of variables of the multi-linear polynomial. + pub fn num_variables(&self) -> usize { + self.evaluations.len().trailing_zeros() as usize + } + + /// Returns the evaluations over the boolean hyper-cube. + pub fn evaluations(&self) -> &[E] { + &self.evaluations + } + + /// Returns the number of evaluations. This is equal to the size of the boolean hyper-cube. + pub fn num_evaluations(&self) -> usize { + self.evaluations.len() + } + + /// Evaluate the multi-linear at some query $(r_0, ..., r_{ν - 1}) ∈ 𝔽^ν$. + /// + /// It first computes the evaluations of the Lagrange basis polynomials over the interpolating + /// set ${0 , 1}^ν$ at $(r_0, ..., r_{ν - 1})$ i.e., the Lagrange kernel at $(r_0, ..., r_{ν - 1})$. + /// The evaluation then is the inner product, indexed by ${0 , 1}^ν$, of the vector of + /// evaluations times the Lagrange kernel. + pub fn evaluate(&self, query: &[E]) -> E { + let tensored_query = compute_lagrange_basis_evals_at(query); + inner_product(&self.evaluations, &tensored_query) + } + + /// Similar to [`Self::evaluate`], except that the query was already turned into the Lagrange + /// kernel (i.e. the [`lagrange_ker::EqFunction`] evaluated at every point in the set + /// `${0 , 1}^ν$`). + /// + /// This is more efficient than [`Self::evaluate`] when multiple different [`MultiLinearPoly`] + /// need to be evaluated at the same query point. + pub fn evaluate_with_lagrange_kernel(&self, lagrange_kernel: &[E]) -> E { + inner_product(&self.evaluations, lagrange_kernel) + } + + /// Computes $f(r_0, y_1, ..., y_{ν - 1})$ using the linear interpolation formula + /// $(1 - r_0) * f(0, y_1, ..., y_{ν - 1}) + r_0 * f(1, y_1, ..., y_{ν - 1})$ and assigns + /// the resulting multi-linear, defined over a domain of half the size, to `self`. + pub fn bind_least_significant_variable(&mut self, round_challenge: E) { + let num_evals = self.evaluations.len() >> 1; + for i in 0..num_evals { + self.evaluations[i] = self.evaluations[i << 1] + + round_challenge * (self.evaluations[(i << 1) + 1] - self.evaluations[i << 1]); + } + self.evaluations.truncate(num_evals) + } + + /// Given the multilinear polynomial $f(y_0, y_1, ..., y_{ν - 1})$, returns two polynomials: + /// $f(0, y_1, ..., y_{ν - 1})$ and $f(1, y_1, ..., y_{ν - 1})$. + pub fn project_least_significant_variable(&self) -> (Self, Self) { + let mut p0 = Vec::with_capacity(self.num_evaluations() / 2); + let mut p1 = Vec::with_capacity(self.num_evaluations() / 2); + for chunk in self.evaluations.chunks_exact(2) { + p0.push(chunk[0]); + p1.push(chunk[1]); + } + + (MultiLinearPoly::from_evaluations(p0), MultiLinearPoly::from_evaluations(p1)) + } +} + +impl Index for MultiLinearPoly { + type Output = E; + + fn index(&self, index: usize) -> &E { + &(self.evaluations[index]) + } +} + +// EQ FUNCTION +// ================================================================================================ + +/// Maximal expected size of the point of a given Lagrange kernel. +const MAX_EQ_SIZE: usize = 25; + +/// The EQ (equality) function is the binary function defined by +/// +/// $$ +/// EQ: {0 , 1}^ν ⛌ {0 , 1}^ν ⇾ {0 , 1} +/// ((x_0, ..., x_{ν - 1}), (y_0, ..., y_{ν - 1})) ↦ \prod_{i = 0}^{ν - 1} (x_i * y_i + (1 - x_i) +/// * (1 - y_i)) +/// $$ +/// +/// Taking its multi-linear extension $EQ^{~}$, we can define a basis for the set of multi-linear +/// polynomials in ν variables by +/// $${EQ^{~}(., (y_0, ..., y_{ν - 1})): (y_0, ..., y_{ν - 1}) ∈ {0 , 1}^ν}$$ +/// where each basis function is a function of its first argument. This is called the Lagrange or +/// evaluation basis for evaluation set ${0 , 1}^ν$. +/// +/// Given a function $(f: {0 , 1}^ν ⇾ 𝔽)$, its multi-linear extension (i.e., the unique +/// mult-linear polynomial extending `f` to $(f^{~}: 𝔽^ν ⇾ 𝔽)$ and agreeing with it on ${0 , 1}^ν$) is +/// defined as the summation of the evaluations of f against the Lagrange basis. +/// More specifically, given $(r_0, ..., r_{ν - 1}) ∈ 𝔽^ν$, then: +/// +/// $$ +/// f^{~}(r_0, ..., r_{ν - 1}) = \sum_{(y_0, ..., y_{ν - 1}) ∈ {0 , 1}^ν} +/// f(y_0, ..., y_{ν - 1}) EQ^{~}((r_0, ..., r_{ν - 1}), (y_0, ..., y_{ν - 1})) +/// $$ +/// +/// We call the Lagrange kernel the evaluation of the EQ^{~} function at +/// $((r_0, ..., r_{ν - 1}), (y_0, ..., y_{ν - 1}))$ for all $(y_0, ..., y_{ν - 1}) ∈ {0 , 1}^ν$ for +/// a fixed $(r_0, ..., r_{ν - 1}) ∈ 𝔽^ν$. +/// +/// [`EqFunction`] represents EQ^{~} the multi-linear extension of +/// +/// $((y_0, ..., y_{ν - 1}) ↦ EQ((r_0, ..., r_{ν - 1}), (y_0, ..., y_{ν - 1})))$ +/// +/// and contains a method to generate the Lagrange kernel for defining evaluations of multi-linear +/// extensions of arbitrary functions $(f: {0 , 1}^ν ⇾ 𝔽)$ at a given point $(r_0, ..., r_{ν - 1})$ +/// as well as a method to evaluate $EQ^{~}((r_0, ..., r_{ν - 1}), (t_0, ..., t_{ν - 1})))$ for +/// $(t_0, ..., t_{ν - 1}) ∈ 𝔽^ν$. +pub struct EqFunction { + r: SmallVec<[E; MAX_EQ_SIZE]>, +} + +impl EqFunction { + /// Creates a new [EqFunction]. + pub fn new(r: Vec) -> Self { + let tmp = r.into(); + EqFunction { r: tmp } + } + + /// Computes $EQ((r_0, ..., r_{ν - 1}), (t_0, ..., t_{ν - 1})))$. + pub fn evaluate(&self, t: &[E]) -> E { + assert_eq!(self.r.len(), t.len()); + + (0..self.r.len()) + .map(|i| self.r[i] * t[i] + (E::ONE - self.r[i]) * (E::ONE - t[i])) + .fold(E::ONE, |acc, term| acc * term) + } + + /// Computes $EQ((r_0, ..., r_{ν - 1}), (y_0, ..., y_{ν - 1}))$ for all + /// $(y_0, ..., y_{ν - 1}) ∈ {0 , 1}^ν$ i.e., the Lagrange kernel at $r = (r_0, ..., r_{ν - 1})$. + pub fn evaluations(&self) -> Vec { + compute_lagrange_basis_evals_at(&self.r) + } + + /// Returns the evaluations of + /// $((y_0, ..., y_{ν - 1}) ↦ EQ^{~}((r_0, ..., r_{ν - 1}), (y_0, ..., y_{ν - 1})))$ + /// over ${0 , 1}^ν$. + pub fn ml_at(evaluation_point: Vec) -> MultiLinearPoly { + let eq_evals = EqFunction::new(evaluation_point.clone()).evaluations(); + MultiLinearPoly::from_evaluations(eq_evals) + } +} + +// HELPER +// ================================================================================================ + +/// Computes the evaluations of the Lagrange basis polynomials over the interpolating +/// set ${0 , 1}^ν$ at $(r_0, ..., r_{ν - 1})$ i.e., the Lagrange kernel at $(r_0, ..., r_{ν - 1})$. +/// +/// TODO: This is a critical function and parallelizing would have a significant impact on +/// performance. +fn compute_lagrange_basis_evals_at(query: &[E]) -> Vec { + let nu = query.len(); + let n = 1 << nu; + + let mut evals: Vec = vec![E::ONE; n]; + let mut size = 1; + for r_i in query.iter().rev() { + size *= 2; + for i in (0..size).rev().step_by(2) { + let scalar = evals[i / 2]; + evals[i] = scalar * *r_i; + evals[i - 1] = scalar - evals[i]; + } + } + evals +} + +/// Computes the inner product in the extension field of two slices with the same number of items. +/// +/// If `concurrent` feature is enabled, this function can make use of multi-threading. +pub fn inner_product(x: &[E], y: &[E]) -> E { + #[cfg(not(feature = "concurrent"))] + return x.iter().zip(y.iter()).fold(E::ZERO, |acc, (x_i, y_i)| acc + *x_i * *y_i); + + #[cfg(feature = "concurrent")] + return x + .par_iter() + .zip(y.par_iter()) + .map(|(x_i, y_i)| *x_i * *y_i) + .reduce(|| E::ZERO, |a, b| a + b); +} + +// TESTS +// ================================================================================================ + +#[test] +fn multi_linear_sanity_checks() { + use math::fields::f64::BaseElement; + let nu = 3; + let n = 1 << nu; + + // the zero multi-linear should evaluate to zero + let p = MultiLinearPoly::from_evaluations(vec![BaseElement::ZERO; n]); + let challenge: Vec = rand_utils::rand_vector(nu); + + assert_eq!(BaseElement::ZERO, p.evaluate(&challenge)); + + // the constant multi-linear should be constant everywhere + let constant = rand_utils::rand_value(); + let p = MultiLinearPoly::from_evaluations(vec![constant; n]); + let challenge: Vec = rand_utils::rand_vector(nu); + + assert_eq!(constant, p.evaluate(&challenge)) +} + +#[test] +fn test_bind() { + use math::fields::f64::BaseElement; + let mut p = MultiLinearPoly::from_evaluations(vec![BaseElement::ONE; 8]); + let expected = MultiLinearPoly::from_evaluations(vec![BaseElement::ONE; 4]); + + let challenge = rand_utils::rand_value(); + p.bind_least_significant_variable(challenge); + assert_eq!(p, expected) +} + +#[test] +fn test_eq_function() { + use math::fields::f64::BaseElement; + use rand_utils::rand_value; + + let one = BaseElement::ONE; + + // Lagrange kernel is computed correctly + let r0 = rand_value(); + let r1 = rand_value(); + let eq_function = EqFunction::new(vec![r0, r1]); + + let expected = vec![(one - r0) * (one - r1), r0 * (one - r1), (one - r0) * r1, r0 * r1]; + + assert_eq!(expected, eq_function.evaluations()); + + // Lagrange kernel evaluation is correct + let q0 = rand_value(); + let q1 = rand_value(); + let tensored_query = vec![(one - q0) * (one - q1), q0 * (one - q1), (one - q0) * q1, q0 * q1]; + + let expected = inner_product(&tensored_query, &eq_function.evaluations()); + + assert_eq!(expected, eq_function.evaluate(&[q0, q1])) +} diff --git a/sumcheck/src/prover/high_degree.rs b/sumcheck/src/prover/high_degree.rs new file mode 100644 index 000000000..731c09ce6 --- /dev/null +++ b/sumcheck/src/prover/high_degree.rs @@ -0,0 +1,382 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +use alloc::vec::Vec; + +use air::LogUpGkrEvaluator; +use crypto::{ElementHasher, RandomCoin}; +use math::FieldElement; + +use super::SumCheckProverError; +use crate::{ + comb_func, evaluate_composition_poly, CompressedUnivariatePolyEvals, EqFunction, + FinalOpeningClaim, MultiLinearPoly, RoundProof, SumCheckProof, SumCheckRoundClaim, +}; + +/// A sum-check prover for the input layer which can accommodate non-linear expressions in +/// the numerators of the LogUp relation. +/// +/// The LogUp-GKR protocol in [1] is an IOP for the following statement +/// +/// $$ +/// \sum_{v_i, x_i} \frac{p_n\left(v_1, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right)} +/// {q_n\left(v_1, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right)} = C +/// $$ +/// +/// where: +/// +/// $$ +/// p_n\left(v_1, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right) = +/// \sum_{w\in\{0, 1\}^\mu} EQ\left(\left(v_1, \cdots, v_{\mu}\right), +/// \left(w_1, \cdots, w_{\mu}\right)\right) +/// g_{[w]}\left(f_1\left(x_1, \cdots, x_{\nu}\right), +/// \cdots, f_l\left(x_1, \cdots, x_{\nu}\right)\right) +/// $$ +/// +/// and +/// +/// $$ +/// q_n\left(v_1, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right) = +/// \sum_{w\in\{0, 1\}^\mu} EQ\left(\left(v_1, \cdots, v_{\mu}\right), +/// \left(w_1, \cdots, w_{\mu}\right)\right) +/// h_{[w]}\left(f_1\left(x_1, \cdots, x_{\nu}\right), +/// \cdots, f_l\left(x_1, \cdots, x_{\nu}\right)\right) +/// $$ +/// +/// and +/// +/// 1. $f_i$ are multi-linears. +/// 2. ${[w]} := \sum_i w_i \cdot 2^i$ and $w := (w_1, \cdots, w_{\mu})$. +/// 3. $h_{j}$ and $g_{j}$ are multi-variate polynomials for $j = 0, \cdots, 2^{\mu} - 1$. +/// 4. $n := \nu + \mu$ +/// +/// The sum above is evaluated using a layered circuit with the equation linking the input layer +/// values $p_n$ to the next layer values $p_{n-1}$ given by the following relations +/// +/// $$ +/// p_{n-1}\left(v_2, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right) = \sum_{w_i, y_i} +/// EQ\left(\left(v_2, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right), +/// \left(w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right)\right) +/// \cdot \left( p_n\left(1, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) +/// \cdot q_n\left(0, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) + +/// p_n\left(0, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) \cdot +/// q_n\left(1, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right)\right) +/// $$ +/// +/// and +/// +/// $$ +/// q_{n-1}\left(v_2, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right) = \sum_{w_i, y_i} +/// EQ\left(\left(v_2, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right), +/// \left(w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right)\right) +/// \cdot \left( q_n\left(1, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) +/// \cdot q_n\left(0, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right)\right) +/// $$ +/// +/// and similarly for all subsequent layers. +/// +/// These expressions are nothing but the equations in Section 3.2 in [1] but with the projection +/// happening at the first argument instead of the last. +/// +/// We can now note a few things about the above: +/// +/// 1. During the evaluation phase of the circuit, the prover needs to compute every tuple +/// $\left(p_k, q_k\right)$ for $k = n, \cdots, 1$ over the boolean hyper-cubes of +/// the appropriate sizes. In particular, the prover will have the evaluations +/// $\left(p_n, q_n\right)$ over $\{0, 1\}^{\mu + \nu}$ stored. +/// 2. Since $p_n$ and $q_n$ are linear in the first $\mu$ variables, we can directly use +/// the stored evaluations of $p_n$ and $q_n$ during the final sum-check, the one linking +/// the input layer to its next layer, for the first $\mu - 1$ rounds. This means that for +/// the first $\mu - 1$ rounds, the last sum-check protocol can be treated like the sum-checks +/// for the other layers i.e., the original degree $3$ sum-check of the LogUp-GKR paper. +/// 3. For the last $\nu$ rounds of the final sum-check, we can still use the evaluations of +/// $\left(p_k, q_k\right)$, or more precisely the result of their binding with the $\mu -1$ +/// round challenges from point 2 above, in order to optimize the computation of the sum-check +/// round polynomials but due to the non-linearity of $\left(p_n, q_n\right)$ in the last $\nu$ +/// variables, we will have to work with +/// +/// $$ +/// p_n\left(v_1, r_1, \cdots, r_{\mu - 1}, x_1, \cdots, x_{\nu}\right) = \sum_{w\in\{0, 1\}^{\mu}} +/// EQ\left(\left(v_1, r_1, \cdots, r_{\mu - 1}\right), \left(w_1, \cdots, w_{\mu}\right)\right) +/// g_{[w]}\left(f_1\left(x_1, \cdots, x_{\nu}\right), \cdots, +/// f_l\left(x_1, \cdots, x_{\nu}\right)\right) +/// $$ +/// +/// and +/// +/// $$ +/// q_n\left(v_1, r_1, \cdots, r_{\mu - 1}, x_1, \cdots, x_{\nu}\right) = \sum_{w\in\{0, 1\}^{\mu}} +/// EQ\left(\left(v_1, r_1, \cdots, r_{\mu - 1}\right), \left(w_1, \cdots, w_{\mu}\right)\right) +/// h_{[w]}\left(f_1\left(x_1, \cdots, x_{\nu}\right), \cdots, +/// f_l\left(x_1, \cdots, x_{\nu}\right)\right) +/// $$ +/// +/// where $r_i$ is the sum-check round challenges of the first $\mu - 1$ rounds. +/// +/// The current function executes the last $\nu$ parts of the sum-check and uses +/// the [`LogUpGkrEvaluator`] to evaluate $g_i$ and $h_i$ during the computation of the evaluations +/// of the round polynomials. +/// +/// As an optimization, the function uses the five polynomials, refered to as [`merged_mls`]: +/// +/// 1. $p_n\left(0, r_1, \cdots, r_{\mu - 1}, x_1, \cdots, x_{\nu}\right)$ +/// 2. $p_n\left(1, r_1, \cdots, r_{\mu - 1}, x_1, \cdots, x_{\nu}\right)$ +/// 3. $q_n\left(0, r_1, \cdots, r_{\mu - 1}, x_1, \cdots, x_{\nu}\right)$ +/// 4. $q_n\left(1, r_1, \cdots, r_{\mu - 1}, x_1, \cdots, x_{\nu}\right)$ +/// 5. $$\left(y_1, \cdots, y_{\nu}\right) \longrightarrow +/// EQ\left(\left(t_1, \cdots, t_{\mu + \nu - 1}\right), +/// \left(r_1, \cdots, r_{\mu - 1}, y_1, \cdots, y_{\nu}\right)\right) +/// $$ +/// where $t_i$ is the sum-check randomness from the previous layer. +/// +/// +/// [1]: https://eprint.iacr.org/2023/1284 +#[allow(clippy::too_many_arguments)] +pub fn sum_check_prove_higher_degree< + E: FieldElement, + C: RandomCoin, + H: ElementHasher, +>( + evaluator: &impl LogUpGkrEvaluator::BaseField>, + claim: E, + r_sum_check: E, + rand_merge: Vec, + log_up_randomness: Vec, + merged_mls: &mut [MultiLinearPoly], + mls: &mut [MultiLinearPoly], + coin: &mut C, +) -> Result, SumCheckProverError> { + let num_rounds = mls[0].num_variables(); + + let mut round_proofs = vec![]; + + // setup first round claim + let mut current_round_claim = SumCheckRoundClaim { eval_point: vec![], claim }; + + // compute, for all (w_1, \cdots, w_{\mu - 1}) in {0, 1}^{\mu - 1}: + // EQ\left(\left(r_1, \cdots, r_{\mu - 1}\right), \left(w_1, \cdots, w_{\mu - 1}\right)\right) + let tensored_merge_randomness = EqFunction::ml_at(rand_merge.to_vec()).evaluations().to_vec(); + + // run the first round of the protocol + let round_poly_evals = sumcheck_round( + evaluator, + mls, + merged_mls, + &log_up_randomness, + r_sum_check, + &tensored_merge_randomness, + ); + let round_poly_coefs = round_poly_evals.to_poly(current_round_claim.claim); + + // reseed with the s_0 polynomial + coin.reseed(H::hash_elements(&round_poly_coefs.0)); + round_proofs.push(RoundProof { round_poly_coefs }); + + for i in 1..num_rounds { + // generate random challenge r_i for the i-th round + let round_challenge = + coin.draw().map_err(|_| SumCheckProverError::FailedToGenerateChallenge)?; + + // compute the new reduced round claim + let new_round_claim = + reduce_claim(&round_proofs[i - 1], current_round_claim, round_challenge); + + // fold each multi-linear using the round challenge + mls.iter_mut() + .for_each(|ml| ml.bind_least_significant_variable(round_challenge)); + + // fold each merged multi-linear using the round challenge + merged_mls + .iter_mut() + .for_each(|ml| ml.bind_least_significant_variable(round_challenge)); + + // run the i-th round of the protocol using the folded multi-linears for the new reduced + // claim. This basically computes the s_i polynomial. + let round_poly_evals = sumcheck_round( + evaluator, + mls, + merged_mls, + &log_up_randomness, + r_sum_check, + &tensored_merge_randomness, + ); + + // update the claim + current_round_claim = new_round_claim; + + let round_poly_coefs = round_poly_evals.to_poly(current_round_claim.claim); + + // reseed with the s_i polynomial + coin.reseed(H::hash_elements(&round_poly_coefs.0)); + let round_proof = RoundProof { round_poly_coefs }; + round_proofs.push(round_proof); + } + + // generate the last random challenge + let round_challenge = + coin.draw().map_err(|_| SumCheckProverError::FailedToGenerateChallenge)?; + + // fold each multi-linear using the last random round challenge + mls.iter_mut() + .for_each(|ml| ml.bind_least_significant_variable(round_challenge)); + // fold each merged multi-linear using the last random round challenge + merged_mls + .iter_mut() + .for_each(|ml| ml.bind_least_significant_variable(round_challenge)); + + let SumCheckRoundClaim { eval_point, claim: _claim } = + reduce_claim(&round_proofs[num_rounds - 1], current_round_claim, round_challenge); + + let openings = mls.iter_mut().map(|ml| ml.evaluations()[0]).collect(); + + Ok(SumCheckProof { + openings_claim: FinalOpeningClaim { eval_point, openings }, + round_proofs, + }) +} + +/// Computes the polynomial +/// +/// $$ +/// s_i(X_i) := \sum_{(x_{i + 1},\cdots, x_{\nu - 1}) +/// w(r_0,\cdots, r_{i - 1}, X_i, x_{i + 1}, \cdots, x_{\nu - 1}). +/// $$ +/// +/// where +/// +/// $$ +/// w(x_0,\cdots, x_{\nu - 1}) := g(f_0((x_0,\cdots, x_{\nu - 1})), +/// \cdots , f_c((x_0,\cdots, x_{\nu - 1}))). +/// $$ +/// +/// where `g` is the expression defined in the documentation of [`sum_check_prove_higher_degree`] +/// +/// Given a degree bound `d_max` for all variables, it suffices to compute the evaluations of `s_i` +/// at `d_max + 1` points. Given that $s_{i}(0) = s_{i}(1) - s_{i - 1}(r_{i - 1})$ it is sufficient +/// to compute the evaluations on only `d_max` points. +/// +/// The algorithm works by iterating over the variables $(x_{i + 1}, \cdots, x_{\nu - 1})$ in +/// ${0, 1}^{\nu - 1 - i}$. For each such tuple, we store the evaluations of the (folded) +/// multi-linears at $(0, x_{i + 1}, \cdots, x_{\nu - 1})$ and +/// $(1, x_{i + 1}, \cdots, x_{\nu - 1})$ in two arrays, `evals_zero` and `evals_one`. +/// Using `evals_one`, remember that we optimize evaluating at 0 away, we get the first evaluation +/// i.e., $s_i(1)$. +/// +/// For the remaining evaluations, we use the fact that the folded `f_i` is multi-linear and hence +/// we can write +/// +/// $$ +/// f_i(X_i, x_{i + 1}, \cdots, x_{\nu - 1}) = +/// (1 - X_i) . f_i(0, x_{i + 1}, \cdots, x_{\nu - 1}) + +/// X_i . f_i(1, x_{i + 1}, \cdots, x_{\nu - 1}) +/// $$ +/// +/// Note that we omitted writing the folding randomness for readability. +/// Since the evaluation domain is $\{0, 1, ... , d_max\}$, we can compute the evaluations based on +/// the previous one using only additions. This is the purpose of `deltas`, to hold the increments +/// added to each multi-linear to compute the evaluation at the next point, and `evals_x` to hold +/// the current evaluation at $x$ in $\{2, ... , d_max\}$. +fn sumcheck_round( + evaluator: &impl LogUpGkrEvaluator::BaseField>, + mls: &[MultiLinearPoly], + merged_mls: &[MultiLinearPoly], + log_up_randomness: &[E], + r_sum_check: E, + tensored_merge_randomness: &[E], +) -> CompressedUnivariatePolyEvals { + let num_ml = mls.len(); + let num_vars = mls[0].num_variables(); + let num_rounds = num_vars - 1; + let mut evals_one = vec![E::ZERO; num_ml]; + let mut evals_zero = vec![E::ZERO; num_ml]; + let mut evals_x = vec![E::ZERO; num_ml]; + + let mut deltas = vec![E::ZERO; num_ml]; + + let mut numerators = vec![E::ZERO; evaluator.get_num_fractions()]; + let mut denominators = vec![E::ZERO; evaluator.get_num_fractions()]; + + let total_evals = (0..1 << num_rounds).map(|i| { + let mut total_evals = vec![E::ZERO; evaluator.max_degree()]; + + for (j, ml) in mls.iter().enumerate() { + evals_zero[j] = ml.evaluations()[2 * i]; + + evals_one[j] = ml.evaluations()[2 * i + 1]; + } + + let eq_at_zero = merged_mls[4].evaluations()[2 * i]; + let eq_at_one = merged_mls[4].evaluations()[2 * i + 1]; + + let p0 = merged_mls[0][2 * i + 1]; + let p1 = merged_mls[1][2 * i + 1]; + let q0 = merged_mls[2][2 * i + 1]; + let q1 = merged_mls[3][2 * i + 1]; + + total_evals[0] = comb_func(p0, p1, q0, q1, eq_at_one, r_sum_check); + + evals_zero + .iter() + .zip(evals_one.iter().zip(deltas.iter_mut().zip(evals_x.iter_mut()))) + .for_each(|(a0, (a1, (delta, evx)))| { + *delta = *a1 - *a0; + *evx = *a1; + }); + let eq_delta = eq_at_one - eq_at_zero; + let mut eq_x = eq_at_one; + + for e in total_evals.iter_mut().skip(1) { + evals_x.iter_mut().zip(deltas.iter()).for_each(|(evx, delta)| { + *evx += *delta; + }); + eq_x += eq_delta; + + evaluator.evaluate_query( + &evals_x, + log_up_randomness, + &mut numerators, + &mut denominators, + ); + + *e = evaluate_composition_poly( + &numerators, + &denominators, + eq_x, + r_sum_check, + tensored_merge_randomness, + ); + } + + total_evals + }); + + let evaluations = total_evals.fold(vec![E::ZERO; evaluator.max_degree()], |mut acc, evals| { + acc.iter_mut().zip(evals.iter()).for_each(|(a, ev)| *a += *ev); + acc + }); + + CompressedUnivariatePolyEvals(evaluations.into()) +} + +/// Reduces an old claim to a new claim using the round challenge. +pub fn reduce_claim( + current_poly: &RoundProof, + current_round_claim: SumCheckRoundClaim, + round_challenge: E, +) -> SumCheckRoundClaim { + // evaluate the round polynomial at the round challenge to obtain the new claim + let new_claim = current_poly + .round_poly_coefs + .evaluate_using_claim(¤t_round_claim.claim, &round_challenge); + + // update the evaluation point using the round challenge + let mut new_partial_eval_point = current_round_claim.eval_point; + new_partial_eval_point.push(round_challenge); + + SumCheckRoundClaim { + eval_point: new_partial_eval_point, + claim: new_claim, + } +} diff --git a/sumcheck/src/prover/plain.rs b/sumcheck/src/prover/plain.rs new file mode 100644 index 000000000..8e53c17c0 --- /dev/null +++ b/sumcheck/src/prover/plain.rs @@ -0,0 +1,140 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +use crypto::{ElementHasher, RandomCoin}; +use math::FieldElement; + +use super::SumCheckProverError; +use crate::{ + comb_func, CompressedUnivariatePolyEvals, FinalOpeningClaim, MultiLinearPoly, RoundProof, + SumCheckProof, +}; + +/// Sum-check prover for non-linear multivariate polynomial of the simple LogUp-GKR. +/// +/// More specifically, the following function implements the logic of the sum-check prover as +/// described in Section 3.2 in [1], that is, given verifier challenges , the following implements +/// the sum-check prover for the following two statements +/// $$ +/// p_{\nu - \kappa}\left(v_{\kappa+1}, \cdots, v_{\nu}\right) = \sum_{w_i} +/// EQ\left(\left(v_{\kappa+1}, \cdots, v_{\nu}\right), \left(w_{\kappa+1}, \cdots, +/// w_{\nu}\right)\right) \cdot +/// \left( p_{\nu-\kappa+1}\left(1, w_{\kappa+1}, \cdots, w_{\nu}\right) \cdot +/// q_{\nu-\kappa+1}\left(0, w_{\kappa+1}, \cdots, w_{\nu}\right) + +/// p_{\nu-\kappa+1}\left(0, w_{\kappa+1}, \cdots, w_{\nu}\right) \cdot +/// q_{\nu-\kappa+1}\left(1, w_{\kappa+1}, \cdots, w_{\nu}\right)\right) +/// $$ +/// +/// and +/// +/// $$ +/// q_{\nu -k}\left(v_{\kappa+1}, \cdots, v_{\nu}\right) = \sum_{w_i}EQ\left(\left(v_{\kappa+1}, +/// \cdots, v_{\nu}\right), \left(w_{\kappa+1}, \cdots, w_{\nu }\right)\right) \cdot +/// \left( q_{\nu-\kappa+1}\left(1, w_{\kappa+1}, \cdots, w_{\nu}\right) \cdot +/// q_{\nu-\kappa+1}\left(0, w_{\kappa+1}, \cdots, w_{\nu}\right)\right) +/// $$ +/// +/// for $k = 1, \cdots, \nu - 1$ +/// +/// Instead of executing two runs of the sum-check protocol, a batching randomness `r_batch` is +/// sent by the verifier at the outset in order to batch the two statments. +/// +/// [1]: https://eprint.iacr.org/2023/1284 +#[allow(clippy::too_many_arguments)] +pub fn sumcheck_prove_plain< + E: FieldElement, + C: RandomCoin, + H: ElementHasher, +>( + num_rounds: usize, + claim: E, + r_batch: E, + p0: &mut MultiLinearPoly, + p1: &mut MultiLinearPoly, + q0: &mut MultiLinearPoly, + q1: &mut MultiLinearPoly, + eq: &mut MultiLinearPoly, + transcript: &mut C, +) -> Result<(SumCheckProof, E), SumCheckProverError> { + let mut round_proofs = vec![]; + + let mut claim = claim; + let mut challenges = vec![]; + for _ in 0..num_rounds { + let mut eval_point_0 = E::ZERO; + let mut eval_point_2 = E::ZERO; + let mut eval_point_3 = E::ZERO; + + let len = p0.num_evaluations() / 2; + for i in 0..len { + eval_point_0 += + comb_func(p0[2 * i], p1[2 * i], q0[2 * i], q1[2 * i], eq[2 * i], r_batch); + + let p0_delta = p0[2 * i + 1] - p0[2 * i]; + let p1_delta = p1[2 * i + 1] - p1[2 * i]; + let q0_delta = q0[2 * i + 1] - q0[2 * i]; + let q1_delta = q1[2 * i + 1] - q1[2 * i]; + let eq_delta = eq[2 * i + 1] - eq[2 * i]; + + let mut p0_evx = p0[2 * i + 1] + p0_delta; + let mut p1_evx = p1[2 * i + 1] + p1_delta; + let mut q0_evx = q0[2 * i + 1] + q0_delta; + let mut q1_evx = q1[2 * i + 1] + q1_delta; + let mut eq_evx = eq[2 * i + 1] + eq_delta; + eval_point_2 += comb_func(p0_evx, p1_evx, q0_evx, q1_evx, eq_evx, r_batch); + + p0_evx += p0_delta; + p1_evx += p1_delta; + q0_evx += q0_delta; + q1_evx += q1_delta; + eq_evx += eq_delta; + eval_point_3 += comb_func(p0_evx, p1_evx, q0_evx, q1_evx, eq_evx, r_batch); + } + + let evals = vec![ + claim - eval_point_0, // Optimization applied using the claim to reduce the number of sums computed + eval_point_2, + eval_point_3, + ]; + let poly = CompressedUnivariatePolyEvals(evals.into()); + let round_poly_coefs = poly.to_poly(claim); + + // reseed with the s_i polynomial + transcript.reseed(H::hash_elements(&round_poly_coefs.0)); + let round_proof = RoundProof { + round_poly_coefs: round_poly_coefs.clone(), + }; + + round_proofs.push(round_proof); + + let round_challenge = + transcript.draw().map_err(|_| SumCheckProverError::FailedToGenerateChallenge)?; + + // compute the new reduced round claim + let new_claim = round_poly_coefs.evaluate_using_claim(&claim, &round_challenge); + + // fold each multi-linear using the round challenge + p0.bind_least_significant_variable(round_challenge); + p1.bind_least_significant_variable(round_challenge); + q0.bind_least_significant_variable(round_challenge); + q1.bind_least_significant_variable(round_challenge); + eq.bind_least_significant_variable(round_challenge); + + challenges.push(round_challenge); + + claim = new_claim; + } + + Ok(( + SumCheckProof { + openings_claim: FinalOpeningClaim { + eval_point: challenges, + openings: vec![p0[0], p1[0], q0[0], q1[0]], + }, + round_proofs, + }, + claim, + )) +} diff --git a/sumcheck/src/univariate.rs b/sumcheck/src/univariate.rs new file mode 100644 index 000000000..082a4daf9 --- /dev/null +++ b/sumcheck/src/univariate.rs @@ -0,0 +1,295 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +use alloc::vec::Vec; + +use math::{batch_inversion, polynom, FieldElement}; +use smallvec::SmallVec; +use utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable}; + +// CONSTANTS +// ================================================================================================ + +/// Maximum expected size of the round polynomials. This is needed for `SmallVec`. The size of +/// the round polynomials is dictated by the degree of the non-linearity in the sum-check statement +/// which is direcly influenced by the maximal degrees of the numerators and denominators appearing +/// in the LogUp-GKR relation and equal to one plus the maximal degree of the numerators and +/// maximal degree of denominators. +/// The following value assumes that this degree is at most 10. +const MAX_POLY_SIZE: usize = 10; + +// COMPRESSED UNIVARIATE POLYNOMIAL +// ================================================================================================ + +/// The coefficients of a univariate polynomial of degree n with the linear term coefficient +/// omitted. +/// +/// This compressed representation is useful during the sum-check protocol as the full uncompressed +/// representation can be recovered from the compressed one and the current sum-check round claim. +#[derive(Clone, Debug, PartialEq)] +pub struct CompressedUnivariatePoly(pub(crate) SmallVec<[E; MAX_POLY_SIZE]>); + +impl CompressedUnivariatePoly { + /// Evaluates a polynomial at a challenge point using a round claim. + /// + /// The round claim is used to recover the coefficient of the linear term using the relation + /// 2 * c0 + c1 + ... c_{n - 1} = claim. Using the complete list of coefficients, the polynomial + /// is then evaluated using Horner's method. + pub fn evaluate_using_claim(&self, claim: &E, challenge: &E) -> E { + // recover the coefficient of the linear term + let c1 = *claim - self.0.iter().fold(E::ZERO, |acc, term| acc + *term) - self.0[0]; + + // construct the full coefficient list + let mut complete_coefficients = vec![self.0[0], c1]; + complete_coefficients.extend_from_slice(&self.0[1..]); + + // evaluate + polynom::eval(&complete_coefficients, *challenge) + } +} + +impl Serializable for CompressedUnivariatePoly { + fn write_into(&self, target: &mut W) { + let vector: Vec = self.0.clone().into_vec(); + vector.write_into(target); + } +} + +impl Deserializable for CompressedUnivariatePoly +where + E: FieldElement, +{ + fn read_from(source: &mut R) -> Result { + let vector: Vec = Vec::::read_from(source)?; + Ok(Self(vector.into())) + } +} + +/// The evaluations of a univariate polynomial of degree n at 0, 1, ..., n with the evaluation at 0 +/// omitted. +/// +/// This compressed representation is useful during the sum-check protocol as the full uncompressed +/// representation can be recovered from the compressed one and the current sum-check round claim. +#[derive(Clone, Debug)] +pub struct CompressedUnivariatePolyEvals(pub(crate) SmallVec<[E; MAX_POLY_SIZE]>); + +impl CompressedUnivariatePolyEvals { + /// Gives the coefficient representation of a polynomial represented in evaluation form. + /// + /// Since the evaluation at 0 is omitted, we need to use the round claim to recover + /// the evaluation at 0 using the identity $p(0) + p(1) = claim$. + /// Now, we have that for any polynomial $p(x) = c0 + c1 * x + ... + c_{n-1} * x^{n - 1}$: + /// + /// 1. $p(0) = c0$. + /// 2. $p(x) = c0 + x * q(x) where q(x) = c1 + ... + c_{n-1} * x^{n - 2}$. + /// + /// This means that we can compute the evaluations of q at 1, ..., n - 1 using the evaluations + /// of p and thus reduce by 1 the size of the interpolation problem. + /// Once the coefficient of q are recovered, the c0 coefficient is appended to these and this + /// is precisely the coefficient representation of the original polynomial q. + /// Note that the coefficient of the linear term is removed as this coefficient can be recovered + /// from the remaining coefficients, again, using the round claim using the relation + /// $2 * c0 + c1 + ... c_{n - 1} = claim$. + pub fn to_poly(&self, round_claim: E) -> CompressedUnivariatePoly { + // construct the vector of interpolation points 1, ..., n + let n_minus_1 = self.0.len(); + let points = (1..=n_minus_1 as u32).map(E::BaseField::from).collect::>(); + + // construct their inverses. These will be needed for computing the evaluations + // of the q polynomial as well as for doing the interpolation on q + let points_inv = batch_inversion(&points); + + // compute the zeroth coefficient + let c0 = round_claim - self.0[0]; + + // compute the evaluations of q + let q_evals: Vec = self + .0 + .iter() + .enumerate() + .map(|(i, evals)| (*evals - c0).mul_base(points_inv[i])) + .collect(); + + // interpolate q + let q_coefs = multiply_by_inverse_vandermonde(&q_evals, &points_inv); + + // append c0 to the coefficients of q to get the coefficients of p. The linear term + // coefficient is removed as this can be recovered from the other coefficients using + // the reduced claim. + let mut coefficients = SmallVec::with_capacity(self.0.len() + 1); + coefficients.push(c0); + coefficients.extend_from_slice(&q_coefs[1..]); + + CompressedUnivariatePoly(coefficients) + } +} + +// HELPER FUNCTIONS +// ================================================================================================ + +/// Given a (row) vector `v`, computes the vector-matrix product `v * V^{-1}` where `V` is +/// the Vandermonde matrix over the points `1, ..., n` where `n` is the length of `v`. +/// The resulting vector will then be the coefficients of the minimal interpolating polynomial +/// through the points `(i+1, v[i])` for `i` in `0, ..., n - 1` +/// +/// The naive way would be to invert the matrix `V` and then compute the vector-matrix product +/// this will cost `O(n^3)` operations and `O(n^2)` memory. We can also try Gaussian elimination +/// but this is also worst case `O(n^3)` operations and `O(n^2)` memory. +/// In the following implementation, we use the fact that the points over which we are interpolating +/// is a set of equidistant points and thus both the Vandermonde matrix and its inverse can be +/// described by sparse linear recurrence equations. +/// More specifically, we use the representation given in [1], where `V^{-1}` is represented as +/// `U * M` where: +/// +/// 1. `M` is a lower triangular matrix where its entries are given by M(i, j) = M(i - 1, j) - M(i - +/// 1, j - 1) / (i - 1) with boundary conditions M(i, 1) = 1 and M(i, j) = 0 when j > i. +/// +/// 2. `U` is an upper triangular (involutory) matrix where its entries are given by U(i, j) = U(i, +/// j - 1) - U(i - 1, j - 1) with boundary condition U(1, j) = 1 and U(i, j) = 0 when i > j. +/// +/// Note that the matrix indexing in the formulas above matches the one in the reference and starts +/// from 1. +/// +/// The above implies that we can do the vector-matrix multiplication in `O(n^2)` and using only +/// `O(n)` space. +/// +/// [1]: https://link.springer.com/article/10.1007/s002110050360 +fn multiply_by_inverse_vandermonde( + vector: &[E], + nodes_inv: &[E::BaseField], +) -> Vec { + let res = multiply_by_u(vector); + multiply_by_m(&res, nodes_inv) +} + +/// Multiplies a (row) vector `v` by an upper triangular matrix `U` to compute `v * U`. +/// +/// `U` is an upper triangular (involutory) matrix with its entries given by +/// U(i, j) = U(i, j - 1) - U(i - 1, j - 1) +/// with boundary condition U(1, j) = 1 and U(i, j) = 0 when i > j. +fn multiply_by_u(vector: &[E]) -> Vec { + let n = vector.len(); + let mut previous_u_col = vec![E::BaseField::ZERO; n]; + previous_u_col[0] = E::BaseField::ONE; + let mut current_u_col = vec![E::BaseField::ZERO; n]; + current_u_col[0] = E::BaseField::ONE; + + let mut result: Vec = vec![E::ZERO; n]; + for (i, res) in result.iter_mut().enumerate() { + *res = vector[0]; + + for (j, v) in vector.iter().enumerate().take(i + 1).skip(1) { + let u_entry: E::BaseField = + compute_u_entry::(j, &mut previous_u_col, &mut current_u_col); + *res += v.mul_base(u_entry); + } + previous_u_col.clone_from(¤t_u_col); + } + + result +} + +/// Multiplies a (row) vector `v` by a lower triangular matrix `M` to compute `v * M`. +/// +/// `M` is a lower triangular matrix with its entries given by +/// M(i, j) = M(i - 1, j) - M(i - 1, j - 1) / (i - 1) +/// with boundary conditions M(i, 1) = 1 and M(i, j) = 0 when j > i. +fn multiply_by_m(vector: &[E], nodes_inv: &[E::BaseField]) -> Vec { + let n = vector.len(); + let mut previous_m_col = vec![E::BaseField::ONE; n]; + let mut current_m_col = vec![E::BaseField::ZERO; n]; + current_m_col[0] = E::BaseField::ONE; + + let mut result: Vec = vec![E::ZERO; n]; + result[0] = vector.iter().fold(E::ZERO, |acc, term| acc + *term); + for (i, res) in result.iter_mut().enumerate().skip(1) { + current_m_col = vec![E::BaseField::ZERO; n]; + + for (j, v) in vector.iter().enumerate().skip(i) { + let m_entry: E::BaseField = + compute_m_entry::(j, &mut previous_m_col, &mut current_m_col, nodes_inv[j - 1]); + *res += v.mul_base(m_entry); + } + previous_m_col.clone_from(¤t_m_col); + } + + result +} + +/// Returns the j-th entry of the i-th column of matrix `U` given the values of the (i - 1)-th +/// column. The i-th column is also updated with the just computed `U(i, j)` entry. +/// +/// `U` is an upper triangular (involutory) matrix with its entries given by +/// U(i, j) = U(i, j - 1) - U(i - 1, j - 1) +/// with boundary condition U(1, j) = 1 and U(i, j) = 0 when i > j. +fn compute_u_entry( + j: usize, + col_prev: &mut [E::BaseField], + col_cur: &mut [E::BaseField], +) -> E::BaseField { + let value = col_prev[j] - col_prev[j - 1]; + col_cur[j] = value; + value +} + +/// Returns the j-th entry of the i-th column of matrix `M` given the values of the (i - 1)-th +/// and the i-th columns. The i-th column is also updated with the just computed `M(i, j)` entry. +/// +/// `M` is a lower triangular matrix with its entries given by +/// M(i, j) = M(i - 1, j) - M(i - 1, j - 1) / (i - 1) +/// with boundary conditions M(i, 1) = 1 and M(i, j) = 0 when j > i. +fn compute_m_entry( + j: usize, + col_previous: &mut [E::BaseField], + col_current: &mut [E::BaseField], + node_inv: E::BaseField, +) -> E::BaseField { + let value = col_current[j - 1] - node_inv * col_previous[j - 1]; + col_current[j] = value; + value +} + +// TESTS +// ================================================================================================ + +#[test] +fn test_poly_partial() { + use math::fields::f64::BaseElement; + + let degree = 1000; + let mut points: Vec = vec![BaseElement::ZERO; degree]; + points + .iter_mut() + .enumerate() + .for_each(|(i, node)| *node = BaseElement::from(i as u32)); + + let p: Vec = rand_utils::rand_vector(degree); + let evals = polynom::eval_many(&p, &points); + + let mut partial_evals = evals.clone(); + partial_evals.remove(0); + + let partial_poly = CompressedUnivariatePolyEvals(partial_evals.into()); + let claim = evals[0] + evals[1]; + let poly_coeff = partial_poly.to_poly(claim); + + let r = rand_utils::rand_vector(1); + + assert_eq!(polynom::eval(&p, r[0]), poly_coeff.evaluate_using_claim(&claim, &r[0])) +} + +#[test] +fn test_serialization() { + use math::fields::f64::BaseElement; + + let original_poly = + CompressedUnivariatePoly(rand_utils::rand_array::().into()); + let poly_bytes = original_poly.to_bytes(); + + let deserialized_poly = + CompressedUnivariatePoly::::read_from_bytes(&poly_bytes).unwrap(); + + assert_eq!(original_poly, deserialized_poly) +} From ad0497d8e7cf936de7b23b3b5368a5ed86789358 Mon Sep 17 00:00:00 2001 From: Al-Kindi-0 <82364884+Al-Kindi-0@users.noreply.github.com> Date: Fri, 9 Aug 2024 10:27:12 +0200 Subject: [PATCH 11/28] chore: move logup evaluator trait to separate file --- air/src/air/logup_gkr.rs | 74 ++++++++++++++++++++++++++++++++++++++++ air/src/air/mod.rs | 68 ++---------------------------------- 2 files changed, 77 insertions(+), 65 deletions(-) create mode 100644 air/src/air/logup_gkr.rs diff --git a/air/src/air/logup_gkr.rs b/air/src/air/logup_gkr.rs new file mode 100644 index 000000000..22ece038d --- /dev/null +++ b/air/src/air/logup_gkr.rs @@ -0,0 +1,74 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +use alloc::vec::Vec; + +use super::EvaluationFrame; +use math::{ExtensionOf, FieldElement, StarkField, ToElements}; + +pub trait LogUpGkrEvaluator: Clone { + /// Defines the base field of the evaluator. + type BaseField: StarkField; + + /// Public inputs need to compute the final claim. + type PublicInputs: ToElements + Send; + + /// Gets a list of all oracles involved in LogUp-GKR; this is intended to be used in construction of + /// MLEs. + fn get_oracles(&self) -> Vec>; + + /// Returns the number of random values needed to evaluate a query. + fn get_num_rand_values(&self) -> usize; + + /// Returns the number of fractions in the LogUp-GKR statement. + fn get_num_fractions(&self) -> usize; + + /// Returns the maximal degree of the multi-variate associated to the input layer. + fn max_degree(&self) -> usize; + + /// Builds a query from the provided main trace frame and periodic values. + /// + /// Note: it should be possible to provide an implementation of this method based on the + /// information returned from `get_oracles()`. However, this implementation is likely to be + /// expensive compared to the hand-written implementation. However, we could provide a test + /// which verifies that `get_oracles()` and `build_query()` methods are consistent. + fn build_query(&self, frame: &EvaluationFrame, periodic_values: &[E]) -> Vec + where + E: FieldElement; + + /// Evaluates the provided query and writes the results into the numerators and denominators. + /// + /// Note: it is also possible to combine `build_query()` and `evaluate_query()` into a single + /// method to avoid the need to first build the query struct and then evaluate it. However: + /// - We assume that the compiler will be able to optimize this away. + /// - Merging the methods will make it more difficult avoid inconsistencies between + /// `evaluate_query()` and `get_oracles()` methods. + fn evaluate_query( + &self, + query: &[F], + rand_values: &[E], + numerator: &mut [E], + denominator: &mut [E], + ) where + F: FieldElement, + E: FieldElement + ExtensionOf; + + /// Computes the final claim for the LogUp-GKR circuit. + /// + /// The default implementation of this method returns E::ZERO as it is expected that the + /// fractional sums will cancel out. However, in cases when some boundary conditions need to + /// be imposed on the LogUp-GKR relations, this method can be overridden to compute the final + /// expected claim. + fn compute_claim(&self, inputs: &Self::PublicInputs, rand_values: &[E]) -> E + where + E: FieldElement; +} + +#[derive(Clone, Debug, PartialEq, PartialOrd, Eq, Ord)] +pub enum LogUpGkrOracle { + CurrentRow(usize), + NextRow(usize), + PeriodicValue(Vec), +} diff --git a/air/src/air/mod.rs b/air/src/air/mod.rs index 1ae1981d5..07f38cce1 100644 --- a/air/src/air/mod.rs +++ b/air/src/air/mod.rs @@ -34,6 +34,9 @@ pub use lagrange::{ LagrangeKernelRandElements, LagrangeKernelTransitionConstraints, }; +mod logup_gkr; +pub use logup_gkr::{LogUpGkrEvaluator, LogUpGkrOracle}; + mod coefficients; pub use coefficients::{ ConstraintCompositionCoefficients, DeepCompositionCoefficients, @@ -601,68 +604,3 @@ pub trait Air: Send + Sync { }) } } - -pub trait LogUpGkrEvaluator: Clone { - /// Defines the base field of the evaluator. - type BaseField: StarkField; - - /// Public inputs need to compute the final claim. - type PublicInputs: ToElements + Send; - - /// Gets a list of all oracles involved in LogUp-GKR; this is intended to be used in construction of - /// MLEs. - fn get_oracles(&self) -> Vec>; - - /// Returns the number of random values needed to evaluate a query. - fn get_num_rand_values(&self) -> usize; - - /// Returns the number of fractions in the LogUp-GKR statement. - fn get_num_fractions(&self) -> usize; - - /// Returns the maximal degree of the multi-variate associated to the input layer. - fn max_degree(&self) -> usize; - - /// Builds a query from the provided main trace frame and periodic values. - /// - /// Note: it should be possible to provide an implementation of this method based on the - /// information returned from `get_oracles()`. However, this implementation is likely to be - /// expensive compared to the hand-written implementation. However, we could provide a test - /// which verifies that `get_oracles()` and `build_query()` methods are consistent. - fn build_query(&self, frame: &EvaluationFrame, periodic_values: &[E]) -> Vec - where - E: FieldElement; - - /// Evaluates the provided query and writes the results into the numerators and denominators. - /// - /// Note: it is also possible to combine `build_query()` and `evaluate_query()` into a single - /// method to avoid the need to first build the query struct and then evaluate it. However: - /// - We assume that the compiler will be able to optimize this away. - /// - Merging the methods will make it more difficult avoid inconsistencies between - /// `evaluate_query()` and `get_oracles()` methods. - fn evaluate_query( - &self, - query: &[F], - rand_values: &[E], - numerator: &mut [E], - denominator: &mut [E], - ) where - F: FieldElement, - E: FieldElement + ExtensionOf; - - /// Computes the final claim for the LogUp-GKR circuit. - /// - /// The default implementation of this method returns E::ZERO as it is expected that the - /// fractional sums will cancel out. However, in cases when some boundary conditions need to - /// be imposed on the LogUp-GKR relations, this method can be overridden to compute the final - /// expected claim. - fn compute_claim(&self, inputs: &Self::PublicInputs, rand_values: &[E]) -> E - where - E: FieldElement; -} - -#[derive(Clone, Debug, PartialEq, PartialOrd, Eq, Ord)] -pub enum LogUpGkrOracle { - CurrentRow(usize), - NextRow(usize), - PeriodicValue(Vec), -} From d721bc2a221a95b8352f77fc32366baefdbc1915 Mon Sep 17 00:00:00 2001 From: Al-Kindi-0 <82364884+Al-Kindi-0@users.noreply.github.com> Date: Thu, 15 Aug 2024 15:36:34 +0200 Subject: [PATCH 12/28] feat: add multi-threading support and simplify input sum-check --- sumcheck/src/lib.rs | 143 ++++++++-- sumcheck/src/multilinear.rs | 194 ++++++++++---- sumcheck/src/prover/high_degree.rs | 412 ++++++++++++++++++----------- sumcheck/src/prover/plain.rs | 129 ++++++--- sumcheck/src/verifier/mod.rs | 106 +++----- 5 files changed, 646 insertions(+), 338 deletions(-) diff --git a/sumcheck/src/lib.rs b/sumcheck/src/lib.rs index 86ef044a6..933fe35c7 100644 --- a/sumcheck/src/lib.rs +++ b/sumcheck/src/lib.rs @@ -13,6 +13,9 @@ use math::FieldElement; #[macro_use] extern crate alloc; +#[cfg(feature = "concurrent")] +pub use rayon::prelude::*; + mod prover; pub use prover::*; @@ -122,8 +125,7 @@ where /// A proof for the input circuit layer i.e., the final layer in the GKR protocol. #[derive(Debug, Clone)] pub struct FinalLayerProof { - pub before_merge_proof: Vec>, - pub after_merge_proof: SumCheckProof, + pub proof: SumCheckProof, } impl Serializable for FinalLayerProof @@ -131,9 +133,8 @@ where E: FieldElement, { fn write_into(&self, target: &mut W) { - let Self { before_merge_proof, after_merge_proof } = self; - before_merge_proof.write_into(target); - after_merge_proof.write_into(target); + let Self { proof } = self; + proof.write_into(target); } } @@ -143,8 +144,7 @@ where { fn read_from(source: &mut R) -> Result { Ok(Self { - before_merge_proof: Deserializable::read_from(source)?, - after_merge_proof: Deserializable::read_from(source)?, + proof: Deserializable::read_from(source)?, }) } } @@ -157,6 +157,104 @@ pub struct SumCheckRoundClaim { pub claim: E, } +// GKR CIRCUIT PROOF +// =============================================================================================== + +/// A GKR proof for the correct evaluation of the sum of fractions circuit. +#[derive(Debug, Clone)] +pub struct GkrCircuitProof { + pub circuit_outputs: CircuitOutput, + pub before_final_layer_proofs: BeforeFinalLayerProof, + pub final_layer_proof: FinalLayerProof, +} + +impl GkrCircuitProof { + pub fn get_final_opening_claim(&self) -> FinalOpeningClaim { + self.final_layer_proof.proof.openings_claim.clone() + } +} + +impl Serializable for GkrCircuitProof +where + E: FieldElement, +{ + fn write_into(&self, target: &mut W) { + self.circuit_outputs.write_into(target); + self.before_final_layer_proofs.write_into(target); + self.final_layer_proof.proof.write_into(target); + } +} + +impl Deserializable for GkrCircuitProof +where + E: FieldElement, +{ + fn read_from(source: &mut R) -> Result { + Ok(Self { + circuit_outputs: CircuitOutput::read_from(source)?, + before_final_layer_proofs: BeforeFinalLayerProof::read_from(source)?, + final_layer_proof: FinalLayerProof::read_from(source)?, + }) + } +} + +/// A set of sum-check proofs for all GKR layers but for the input circuit layer. +#[derive(Debug, Clone)] +pub struct BeforeFinalLayerProof { + pub proof: Vec>, +} + +impl Serializable for BeforeFinalLayerProof +where + E: FieldElement, +{ + fn write_into(&self, target: &mut W) { + let Self { proof } = self; + proof.write_into(target); + } +} + +impl Deserializable for BeforeFinalLayerProof +where + E: FieldElement, +{ + fn read_from(source: &mut R) -> Result { + Ok(Self { + proof: Deserializable::read_from(source)?, + }) + } +} + +/// Holds the output layer of an [`EvaluatedCircuit`]. +#[derive(Clone, Debug)] +pub struct CircuitOutput { + pub numerators: MultiLinearPoly, + pub denominators: MultiLinearPoly, +} + +impl Serializable for CircuitOutput +where + E: FieldElement, +{ + fn write_into(&self, target: &mut W) { + let Self { numerators, denominators } = self; + numerators.write_into(target); + denominators.write_into(target); + } +} + +impl Deserializable for CircuitOutput +where + E: FieldElement, +{ + fn read_from(source: &mut R) -> Result { + Ok(Self { + numerators: MultiLinearPoly::read_from(source)?, + denominators: MultiLinearPoly::read_from(source)?, + }) + } +} + /// The non-linear composition polynomial of the LogUp-GKR protocol. /// /// This is the result of batching the `p_k` and `q_k` of section 3.2 in @@ -167,11 +265,11 @@ fn comb_func(p0: E, p1: E, q0: E, q1: E, eq: E, r_batch: E) -> /// The non-linear composition polynomial of the LogUp-GKR protocol specific to the input layer. pub fn evaluate_composition_poly( + eq_at_mu: &[E], numerators: &[E], denominators: &[E], eq_eval: E, r_sum_check: E, - tensored_merge_randomness: &[E], ) -> E { let numerators = MultiLinearPoly::from_evaluations(numerators.to_vec()); let denominators = MultiLinearPoly::from_evaluations(denominators.to_vec()); @@ -179,18 +277,19 @@ pub fn evaluate_composition_poly( let (left_numerators, right_numerators) = numerators.project_least_significant_variable(); let (left_denominators, right_denominators) = denominators.project_least_significant_variable(); - let eval_left_numerators = - left_numerators.evaluate_with_lagrange_kernel(tensored_merge_randomness); - let eval_right_numerators = - right_numerators.evaluate_with_lagrange_kernel(tensored_merge_randomness); - - let eval_left_denominators = - left_denominators.evaluate_with_lagrange_kernel(tensored_merge_randomness); - let eval_right_denominators = - right_denominators.evaluate_with_lagrange_kernel(tensored_merge_randomness); - - eq_eval - * ((eval_left_numerators * eval_right_denominators - + eval_right_numerators * eval_left_denominators) - + eval_left_denominators * eval_right_denominators * r_sum_check) + left_numerators + .evaluations() + .iter() + .zip( + right_numerators.evaluations().iter().zip( + left_denominators + .evaluations() + .iter() + .zip(right_denominators.evaluations().iter().zip(eq_at_mu.iter())), + ), + ) + .map(|(p0, (p1, (q0, (q1, eq_w))))| { + *eq_w * comb_func(*p0, *p1, *q0, *q1, eq_eval, r_sum_check) + }) + .fold(E::ZERO, |acc, x| acc + x) } diff --git a/sumcheck/src/multilinear.rs b/sumcheck/src/multilinear.rs index 0ad5f6a18..ede55b7ca 100644 --- a/sumcheck/src/multilinear.rs +++ b/sumcheck/src/multilinear.rs @@ -10,6 +10,7 @@ use math::FieldElement; #[cfg(feature = "concurrent")] pub use rayon::prelude::*; use smallvec::SmallVec; +use utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable}; // MULTI-LINEAR POLYNOMIAL // ================================================================================================ @@ -17,14 +18,14 @@ use smallvec::SmallVec; /// Represents a multi-linear polynomial. /// /// The representation stores the evaluations of the polynomial over the boolean hyper-cube -/// ${0 , 1}^ν$. +/// ${0 , 1}^{\nu}$. #[derive(Clone, Debug, PartialEq)] pub struct MultiLinearPoly { evaluations: Vec, } impl MultiLinearPoly { - /// Constructs a [`MultiLinearPoly`] from its evaluations over the boolean hyper-cube ${0 , 1}^ν$. + /// Constructs a [`MultiLinearPoly`] from its evaluations over the boolean hyper-cube ${0 , 1}^{\nu}$. pub fn from_evaluations(evaluations: Vec) -> Self { assert!(evaluations.len().is_power_of_two(), "A multi-linear polynomial should have a power of 2 number of evaluations over the Boolean hyper-cube"); Self { evaluations } @@ -45,11 +46,11 @@ impl MultiLinearPoly { self.evaluations.len() } - /// Evaluate the multi-linear at some query $(r_0, ..., r_{ν - 1}) ∈ 𝔽^ν$. + /// Evaluate the multi-linear at some query $(r_0, ..., r_{{\nu} - 1}) \in \mathbb{F}^{\nu}$. /// /// It first computes the evaluations of the Lagrange basis polynomials over the interpolating - /// set ${0 , 1}^ν$ at $(r_0, ..., r_{ν - 1})$ i.e., the Lagrange kernel at $(r_0, ..., r_{ν - 1})$. - /// The evaluation then is the inner product, indexed by ${0 , 1}^ν$, of the vector of + /// set ${0 , 1}^{\nu}$ at $(r_0, ..., r_{{\nu} - 1})$ i.e., the Lagrange kernel at $(r_0, ..., r_{{\nu} - 1})$. + /// The evaluation then is the inner product, indexed by ${0 , 1}^{\nu}$, of the vector of /// evaluations times the Lagrange kernel. pub fn evaluate(&self, query: &[E]) -> E { let tensored_query = compute_lagrange_basis_evals_at(query); @@ -58,7 +59,7 @@ impl MultiLinearPoly { /// Similar to [`Self::evaluate`], except that the query was already turned into the Lagrange /// kernel (i.e. the [`lagrange_ker::EqFunction`] evaluated at every point in the set - /// `${0 , 1}^ν$`). + /// `${0 , 1}^{\nu}$`). /// /// This is more efficient than [`Self::evaluate`] when multiple different [`MultiLinearPoly`] /// need to be evaluated at the same query point. @@ -66,20 +67,33 @@ impl MultiLinearPoly { inner_product(&self.evaluations, lagrange_kernel) } - /// Computes $f(r_0, y_1, ..., y_{ν - 1})$ using the linear interpolation formula - /// $(1 - r_0) * f(0, y_1, ..., y_{ν - 1}) + r_0 * f(1, y_1, ..., y_{ν - 1})$ and assigns + /// Computes $f(r_0, y_1, ..., y_{{\nu} - 1})$ using the linear interpolation formula + /// $(1 - r_0) * f(0, y_1, ..., y_{{\nu} - 1}) + r_0 * f(1, y_1, ..., y_{{\nu} - 1})$ and assigns /// the resulting multi-linear, defined over a domain of half the size, to `self`. pub fn bind_least_significant_variable(&mut self, round_challenge: E) { let num_evals = self.evaluations.len() >> 1; - for i in 0..num_evals { - self.evaluations[i] = self.evaluations[i << 1] - + round_challenge * (self.evaluations[(i << 1) + 1] - self.evaluations[i << 1]); + #[cfg(not(feature = "concurrent"))] + { + for i in 0..num_evals { + self.evaluations[i] = self.evaluations[i << 1] + + round_challenge * (self.evaluations[(i << 1) + 1] - self.evaluations[i << 1]); + } + self.evaluations.truncate(num_evals) + } + + #[cfg(feature = "concurrent")] + { + let mut result = unsafe { utils::uninit_vector(num_evals) }; + result.par_iter_mut().enumerate().for_each(|(i, ev)| { + *ev = self.evaluations[i << 1] + + round_challenge * (self.evaluations[(i << 1) + 1] - self.evaluations[i << 1]) + }); + self.evaluations = result } - self.evaluations.truncate(num_evals) } - /// Given the multilinear polynomial $f(y_0, y_1, ..., y_{ν - 1})$, returns two polynomials: - /// $f(0, y_1, ..., y_{ν - 1})$ and $f(1, y_1, ..., y_{ν - 1})$. + /// Given the multilinear polynomial $f(y_0, y_1, ..., y_{{\nu} - 1})$, returns two polynomials: + /// $f(0, y_1, ..., y_{{\nu} - 1})$ and $f(1, y_1, ..., y_{{\nu} - 1})$. pub fn project_least_significant_variable(&self) -> (Self, Self) { let mut p0 = Vec::with_capacity(self.num_evaluations() / 2); let mut p1 = Vec::with_capacity(self.num_evaluations() / 2); @@ -100,6 +114,27 @@ impl Index for MultiLinearPoly { } } +impl Serializable for MultiLinearPoly +where + E: FieldElement, +{ + fn write_into(&self, target: &mut W) { + let Self { evaluations } = self; + evaluations.write_into(target); + } +} + +impl Deserializable for MultiLinearPoly +where + E: FieldElement, +{ + fn read_from(source: &mut R) -> Result { + Ok(Self { + evaluations: Deserializable::read_from(source)?, + }) + } +} + // EQ FUNCTION // ================================================================================================ @@ -109,39 +144,39 @@ const MAX_EQ_SIZE: usize = 25; /// The EQ (equality) function is the binary function defined by /// /// $$ -/// EQ: {0 , 1}^ν ⛌ {0 , 1}^ν ⇾ {0 , 1} -/// ((x_0, ..., x_{ν - 1}), (y_0, ..., y_{ν - 1})) ↦ \prod_{i = 0}^{ν - 1} (x_i * y_i + (1 - x_i) -/// * (1 - y_i)) +/// EQ: {0 , 1}^{\nu} ⛌ {0 , 1}^{\nu} \longrightarrow {0 , 1} +/// ((x_0, ..., x_{{\nu} - 1}), (y_0, ..., y_{{\nu} - 1})) \mapsto \prod_{i = 0}^{{\nu} - 1} (x_i \cdot y_i + (1 - x_i) +/// \cdot (1 - y_i)) /// $$ /// -/// Taking its multi-linear extension $EQ^{~}$, we can define a basis for the set of multi-linear -/// polynomials in ν variables by -/// $${EQ^{~}(., (y_0, ..., y_{ν - 1})): (y_0, ..., y_{ν - 1}) ∈ {0 , 1}^ν}$$ +/// Taking its multi-linear extension $\tilde{EQ}$, we can define a basis for the set of multi-linear +/// polynomials in {\nu} variables by +/// $${\tilde{EQ}(., (y_0, ..., y_{{\nu} - 1})): (y_0, ..., y_{{\nu} - 1}) \in {0 , 1}^{\nu}}$$ /// where each basis function is a function of its first argument. This is called the Lagrange or -/// evaluation basis for evaluation set ${0 , 1}^ν$. +/// evaluation basis for evaluation set ${0 , 1}^{\nu}$. /// -/// Given a function $(f: {0 , 1}^ν ⇾ 𝔽)$, its multi-linear extension (i.e., the unique -/// mult-linear polynomial extending `f` to $(f^{~}: 𝔽^ν ⇾ 𝔽)$ and agreeing with it on ${0 , 1}^ν$) is +/// Given a function $(f: {0 , 1}^{\nu} \longrightarrow \mathbb{F})$, its multi-linear extension (i.e., the unique +/// mult-linear polynomial extending `f` to $(\tilde{f}: \mathbb{F}^{\nu} \longrightarrow \mathbb{F})$ and agreeing with it on ${0 , 1}^{\nu}$) is /// defined as the summation of the evaluations of f against the Lagrange basis. -/// More specifically, given $(r_0, ..., r_{ν - 1}) ∈ 𝔽^ν$, then: +/// More specifically, given $(r_0, ..., r_{{\nu} - 1}) \in \mathbb{F}^{\nu}$, then: /// /// $$ -/// f^{~}(r_0, ..., r_{ν - 1}) = \sum_{(y_0, ..., y_{ν - 1}) ∈ {0 , 1}^ν} -/// f(y_0, ..., y_{ν - 1}) EQ^{~}((r_0, ..., r_{ν - 1}), (y_0, ..., y_{ν - 1})) +/// \tilde{f}(r_0, ..., r_{{\nu} - 1}) = \sum_{(y_0, ..., y_{{\nu} - 1}) \in {0 , 1}^{\nu}} +/// f(y_0, ..., y_{{\nu} - 1}) \tilde{EQ}((r_0, ..., r_{{\nu} - 1}), (y_0, ..., y_{{\nu} - 1})) /// $$ /// -/// We call the Lagrange kernel the evaluation of the EQ^{~} function at -/// $((r_0, ..., r_{ν - 1}), (y_0, ..., y_{ν - 1}))$ for all $(y_0, ..., y_{ν - 1}) ∈ {0 , 1}^ν$ for -/// a fixed $(r_0, ..., r_{ν - 1}) ∈ 𝔽^ν$. +/// We call the Lagrange kernel the evaluation of the $\tilde{EQ}$ function at +/// $((r_0, ..., r_{{\nu} - 1}), (y_0, ..., y_{{\nu} - 1}))$ for all $(y_0, ..., y_{{\nu} - 1}) \in {0 , 1}^{\nu}$ for +/// a fixed $(r_0, ..., r_{{\nu} - 1}) \in \mathbb{F}^{\nu}$. /// -/// [`EqFunction`] represents EQ^{~} the multi-linear extension of +/// [`EqFunction`] represents $\tilde{EQ}$ the multi-linear extension of /// -/// $((y_0, ..., y_{ν - 1}) ↦ EQ((r_0, ..., r_{ν - 1}), (y_0, ..., y_{ν - 1})))$ +/// $((y_0, ..., y_{{\nu} - 1}) \mapsto EQ((r_0, ..., r_{{\nu} - 1}), (y_0, ..., y_{{\nu} - 1})))$ /// /// and contains a method to generate the Lagrange kernel for defining evaluations of multi-linear -/// extensions of arbitrary functions $(f: {0 , 1}^ν ⇾ 𝔽)$ at a given point $(r_0, ..., r_{ν - 1})$ -/// as well as a method to evaluate $EQ^{~}((r_0, ..., r_{ν - 1}), (t_0, ..., t_{ν - 1})))$ for -/// $(t_0, ..., t_{ν - 1}) ∈ 𝔽^ν$. +/// extensions of arbitrary functions $(f: {0 , 1}^{\nu} \longrightarrow \mathbb{F})$ at a given point $(r_0, ..., r_{{\nu} - 1})$ +/// as well as a method to evaluate $\tilde{EQ}((r_0, ..., r_{{\nu} - 1}), (t_0, ..., t_{{\nu} - 1})))$ for +/// $(t_0, ..., t_{{\nu} - 1}) \in \mathbb{F}^{\nu}$. pub struct EqFunction { r: SmallVec<[E; MAX_EQ_SIZE]>, } @@ -153,7 +188,7 @@ impl EqFunction { EqFunction { r: tmp } } - /// Computes $EQ((r_0, ..., r_{ν - 1}), (t_0, ..., t_{ν - 1})))$. + /// Computes $\tilde{EQ}((r_0, ..., r_{{\nu} - 1}), (t_0, ..., t_{{\nu} - 1})))$. pub fn evaluate(&self, t: &[E]) -> E { assert_eq!(self.r.len(), t.len()); @@ -162,15 +197,15 @@ impl EqFunction { .fold(E::ONE, |acc, term| acc * term) } - /// Computes $EQ((r_0, ..., r_{ν - 1}), (y_0, ..., y_{ν - 1}))$ for all - /// $(y_0, ..., y_{ν - 1}) ∈ {0 , 1}^ν$ i.e., the Lagrange kernel at $r = (r_0, ..., r_{ν - 1})$. + /// Computes $\tilde{EQ}((r_0, ..., r_{{\nu} - 1}), (y_0, ..., y_{{\nu} - 1}))$ for all + /// $(y_0, ..., y_{{\nu} - 1}) \in {0 , 1}^{\nu}$ i.e., the Lagrange kernel at $r = (r_0, ..., r_{{\nu} - 1})$. pub fn evaluations(&self) -> Vec { compute_lagrange_basis_evals_at(&self.r) } /// Returns the evaluations of - /// $((y_0, ..., y_{ν - 1}) ↦ EQ^{~}((r_0, ..., r_{ν - 1}), (y_0, ..., y_{ν - 1})))$ - /// over ${0 , 1}^ν$. + /// $((y_0, ..., y_{{\nu} - 1}) \mapsto \tilde{EQ}((r_0, ..., r_{{\nu} - 1}), (y_0, ..., y_{{\nu} - 1})))$ + /// over ${0 , 1}^{\nu}$. pub fn ml_at(evaluation_point: Vec) -> MultiLinearPoly { let eq_evals = EqFunction::new(evaluation_point.clone()).evaluations(); MultiLinearPoly::from_evaluations(eq_evals) @@ -181,24 +216,79 @@ impl EqFunction { // ================================================================================================ /// Computes the evaluations of the Lagrange basis polynomials over the interpolating -/// set ${0 , 1}^ν$ at $(r_0, ..., r_{ν - 1})$ i.e., the Lagrange kernel at $(r_0, ..., r_{ν - 1})$. +/// set ${0 , 1}^{\nu}$ at $(r_0, ..., r_{{\nu} - 1})$ i.e., the Lagrange kernel at $(r_0, ..., r_{{\nu} - 1})$. +/// +/// If `concurrent` feature is enabled, this function can make use of multi-threading. +/// +/// The implementation uses the memoization technique in Lemma 3.8 in [1]. More precisely, we can +/// build a table $A^{(\nu)}$ in $\nu$ steps using the following master equation: +/// +/// $$ +/// A^{(j)}\left[\left(w_{1}, \dots, w_{j} \right)\right] = +/// A^{(j - 1)}\left[\left(w_{1}, \dots, w_{j - 1} \right)\right] \times +/// \left(w_{j}\cdot r_{j} + (1 - w_{j})\cdot( 1 - r_{j}) \right) +/// $$ +/// +/// if we interpret $\left(w_{1}, \dots, w_{j} \right)$ in little endian i.e., +/// $\left(w_{1}, \dots, w_{j} \right) = \sum_{i=1}^{\nu} 2^{i - 1}\cdot w_{i}$. +/// +/// We thus have the following algorithm: /// -/// TODO: This is a critical function and parallelizing would have a significant impact on -/// performance. +/// 1. Split current table, stored as a vector, $A^{(j)}\left[\left(w_{1}, \dots, w_{j} \right)\right]$ +/// into two tables $A^{(j)}\left[\left(w_{1}, \dots, w_{j-1}, 0 \right)\right]$ and +/// $A^{(j)}\left[\left(w_{1}, \dots, w_{j-1}, 1 \right)\right]$, +/// with the first part initialized to $A^{(j - 1)}\left[\left(w_{1}, \dots, w_{j-1} \right)\right]$. +/// 2. Iterating over $\left(w_{1}, \dots, w_{j-1} \right)$, do: +/// 1. Let $factor = A^{(j - 1)}\left[\left(w_{1}, \dots, w_{j-1} \right)\right]$, which is equal +/// by the above to $A^{(j)}\left[\left(w_{1}, \dots, w_{j-1}, 0 \right)\right]$. +/// 2. $A^{(j)}\left[\left(w_{1}, \dots, w_{j-1}, 1 \right)\right] = factor \cdot r_j$ +/// 3. $A^{(j)}\left[\left(w_{1}, \dots, w_{j-1}, 0 \right)\right] = +/// A^{(j)}\left[\left(w_{1}, \dots, w_{j-1}, 0 \right)\right] - +/// A^{(j)}\left[\left(w_{1}, \dots, w_{j-1}, 1 \right)\right]$ +/// +/// Note that we can allocate from the start a vector of size $2^{\nu}$ in order to hold the final +/// as well as the intermediate tables. +/// +/// [1]: https://people.cs.georgetown.edu/jthaler/ProofsArgsAndZK.pdf fn compute_lagrange_basis_evals_at(query: &[E]) -> Vec { - let nu = query.len(); - let n = 1 << nu; + let n = 1 << query.len(); + let mut evals = unsafe { utils::uninit_vector(n) }; - let mut evals: Vec = vec![E::ONE; n]; let mut size = 1; - for r_i in query.iter().rev() { - size *= 2; - for i in (0..size).rev().step_by(2) { - let scalar = evals[i / 2]; - evals[i] = scalar * *r_i; - evals[i - 1] = scalar - evals[i]; + evals[0] = E::ONE; + #[cfg(not(feature = "concurrent"))] + let evals = { + for r_i in query.iter() { + let (left_evals, right_evals) = evals.split_at_mut(size); + left_evals.iter_mut().zip(right_evals.iter_mut()).for_each(|(left, right)| { + let factor = *left; + *right = factor * *r_i; + *left -= *right; + }); + + size <<= 1; } - } + evals + }; + + #[cfg(feature = "concurrent")] + let evals = { + for r_i in query.iter() { + let (left_evals, right_evals) = evals.split_at_mut(size); + left_evals + .par_iter_mut() + .zip(right_evals.par_iter_mut()) + .for_each(|(left, right)| { + let factor = *left; + *right = factor * *r_i; + *left -= *right; + }); + + size <<= 1; + } + evals + }; + evals } diff --git a/sumcheck/src/prover/high_degree.rs b/sumcheck/src/prover/high_degree.rs index 731c09ce6..441f7d7bd 100644 --- a/sumcheck/src/prover/high_degree.rs +++ b/sumcheck/src/prover/high_degree.rs @@ -8,11 +8,13 @@ use alloc::vec::Vec; use air::LogUpGkrEvaluator; use crypto::{ElementHasher, RandomCoin}; use math::FieldElement; +#[cfg(feature = "concurrent")] +pub use rayon::prelude::*; use super::SumCheckProverError; use crate::{ - comb_func, evaluate_composition_poly, CompressedUnivariatePolyEvals, EqFunction, - FinalOpeningClaim, MultiLinearPoly, RoundProof, SumCheckProof, SumCheckRoundClaim, + evaluate_composition_poly, CompressedUnivariatePolyEvals, EqFunction, FinalOpeningClaim, + MultiLinearPoly, RoundProof, SumCheckProof, SumCheckRoundClaim, }; /// A sum-check prover for the input layer which can accommodate non-linear expressions in @@ -51,86 +53,100 @@ use crate::{ /// 2. ${[w]} := \sum_i w_i \cdot 2^i$ and $w := (w_1, \cdots, w_{\mu})$. /// 3. $h_{j}$ and $g_{j}$ are multi-variate polynomials for $j = 0, \cdots, 2^{\mu} - 1$. /// 4. $n := \nu + \mu$ +/// 5. $\mathbb{B}_{\gamma} := \{0, 1\}^{\gamma}$ for positive integer $\gamma$. /// /// The sum above is evaluated using a layered circuit with the equation linking the input layer /// values $p_n$ to the next layer values $p_{n-1}$ given by the following relations /// /// $$ /// p_{n-1}\left(v_2, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right) = \sum_{w_i, y_i} -/// EQ\left(\left(v_2, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right), -/// \left(w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right)\right) -/// \cdot \left( p_n\left(1, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) -/// \cdot q_n\left(0, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) + -/// p_n\left(0, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) \cdot -/// q_n\left(1, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right)\right) +/// EQ\left(\left(v_2, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right), +/// \left(w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right)\right) +/// \cdot \left( p_n\left(1, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) +/// \cdot q_n\left(0, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) + +/// p_n\left(0, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) \cdot +/// q_n\left(1, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right)\right) /// $$ /// /// and /// /// $$ /// q_{n-1}\left(v_2, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right) = \sum_{w_i, y_i} -/// EQ\left(\left(v_2, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right), -/// \left(w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right)\right) -/// \cdot \left( q_n\left(1, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) -/// \cdot q_n\left(0, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right)\right) +/// EQ\left(\left(v_2, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right), +/// \left(w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right)\right) +/// \cdot \left( q_n\left(1, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) +/// \cdot q_n\left(0, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right)\right) /// $$ /// /// and similarly for all subsequent layers. /// -/// These expressions are nothing but the equations in Section 3.2 in [1] but with the projection -/// happening at the first argument instead of the last. -/// -/// We can now note a few things about the above: -/// -/// 1. During the evaluation phase of the circuit, the prover needs to compute every tuple -/// $\left(p_k, q_k\right)$ for $k = n, \cdots, 1$ over the boolean hyper-cubes of -/// the appropriate sizes. In particular, the prover will have the evaluations -/// $\left(p_n, q_n\right)$ over $\{0, 1\}^{\mu + \nu}$ stored. -/// 2. Since $p_n$ and $q_n$ are linear in the first $\mu$ variables, we can directly use -/// the stored evaluations of $p_n$ and $q_n$ during the final sum-check, the one linking -/// the input layer to its next layer, for the first $\mu - 1$ rounds. This means that for -/// the first $\mu - 1$ rounds, the last sum-check protocol can be treated like the sum-checks -/// for the other layers i.e., the original degree $3$ sum-check of the LogUp-GKR paper. -/// 3. For the last $\nu$ rounds of the final sum-check, we can still use the evaluations of -/// $\left(p_k, q_k\right)$, or more precisely the result of their binding with the $\mu -1$ -/// round challenges from point 2 above, in order to optimize the computation of the sum-check -/// round polynomials but due to the non-linearity of $\left(p_n, q_n\right)$ in the last $\nu$ -/// variables, we will have to work with +/// By the properties of the $EQ$ function, we can write the above as follows: /// /// $$ -/// p_n\left(v_1, r_1, \cdots, r_{\mu - 1}, x_1, \cdots, x_{\nu}\right) = \sum_{w\in\{0, 1\}^{\mu}} -/// EQ\left(\left(v_1, r_1, \cdots, r_{\mu - 1}\right), \left(w_1, \cdots, w_{\mu}\right)\right) -/// g_{[w]}\left(f_1\left(x_1, \cdots, x_{\nu}\right), \cdots, -/// f_l\left(x_1, \cdots, x_{\nu}\right)\right) +/// p_{n-1}\left(v_2, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right) = \sum_{y_i} +/// EQ\left(\left(x_1, \cdots, x_{\nu}\right), +/// \left(y_1, \cdots, y_{\nu}\right)\right) +/// \left( \sum_{w_i} EQ\left(\left(v_2, \cdots, v_{\mu}\right), +/// \left(w_2, \cdots, w_{\mu}\right)\right) +/// \cdot \left( p_n\left(1, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) +/// \cdot q_n\left(0, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) + +/// p_n\left(0, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) \cdot +/// q_n\left(1, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right)\right) \right) /// $$ /// /// and /// /// $$ -/// q_n\left(v_1, r_1, \cdots, r_{\mu - 1}, x_1, \cdots, x_{\nu}\right) = \sum_{w\in\{0, 1\}^{\mu}} -/// EQ\left(\left(v_1, r_1, \cdots, r_{\mu - 1}\right), \left(w_1, \cdots, w_{\mu}\right)\right) -/// h_{[w]}\left(f_1\left(x_1, \cdots, x_{\nu}\right), \cdots, -/// f_l\left(x_1, \cdots, x_{\nu}\right)\right) +/// q_{n-1}\left(v_2, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right) = \sum_{y_i} +/// EQ\left(\left(x_1, \cdots, x_{\nu}\right), +/// \left(y_1, \cdots, y_{\nu}\right)\right) +/// \left( \sum_{w_i} EQ\left(\left(v_2, \cdots, v_{\mu}\right)\right) +/// \cdot q_n\left(1, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) +/// \cdot q_n\left(0, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) \right) +/// $$ +/// +/// These expressions are nothing but the equations in Section 3.2 in [1] but with the projection +/// happening in the first argument instead of the last one. +/// The current function is then tasked with running a batched sum-check protocol for +/// +/// $$ +/// p_{n-1}\left(v_2, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right) = +/// \sum_{y\in\mathbb{B}_{\nu}} G(y_{1}, ..., y_{\nu}) /// $$ /// -/// where $r_i$ is the sum-check round challenges of the first $\mu - 1$ rounds. +/// and +/// +/// $$ +/// q_{n-1}\left(v_2, \cdots, v_{\mu}, x_1, \cdots, x_{\nu}\right) = +/// \sum_{y\in\mathbb{B}_{\nu}} H\left(y_1, \cdots, y_{\nu} \right) +/// $$ /// -/// The current function executes the last $\nu$ parts of the sum-check and uses -/// the [`LogUpGkrEvaluator`] to evaluate $g_i$ and $h_i$ during the computation of the evaluations -/// of the round polynomials. +/// where /// -/// As an optimization, the function uses the five polynomials, refered to as [`merged_mls`]: +/// $$ +/// G := \left( \left(y_1, \cdots, y_{\nu}\right) \longrightarrow +/// EQ\left(\left(x_1, \cdots, x_{\nu}\right), +/// \left(y_1, \cdots, y_{\nu}\right)\right) +/// \left( \sum_{w_i} EQ\left(\left(v_2, \cdots, v_{\mu}\right), +/// \left(w_2, \cdots, w_{\mu}\right)\right) +/// \cdot \left( p_n\left(1, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) +/// \cdot q_n\left(0, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) + +/// p_n\left(0, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) \cdot +/// q_n\left(1, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right)\right) \right) +/// \right) +/// $$ /// -/// 1. $p_n\left(0, r_1, \cdots, r_{\mu - 1}, x_1, \cdots, x_{\nu}\right)$ -/// 2. $p_n\left(1, r_1, \cdots, r_{\mu - 1}, x_1, \cdots, x_{\nu}\right)$ -/// 3. $q_n\left(0, r_1, \cdots, r_{\mu - 1}, x_1, \cdots, x_{\nu}\right)$ -/// 4. $q_n\left(1, r_1, \cdots, r_{\mu - 1}, x_1, \cdots, x_{\nu}\right)$ -/// 5. $$\left(y_1, \cdots, y_{\nu}\right) \longrightarrow -/// EQ\left(\left(t_1, \cdots, t_{\mu + \nu - 1}\right), -/// \left(r_1, \cdots, r_{\mu - 1}, y_1, \cdots, y_{\nu}\right)\right) -/// $$ -/// where $t_i$ is the sum-check randomness from the previous layer. +/// and /// +/// $$ +/// H := \left( \left(y_1, \cdots, y_{\nu}\right) \longrightarrow +/// EQ\left(\left(x_1, \cdots, x_{\nu}\right), +/// \left(y_1, \cdots, y_{\nu}\right)\right) +/// \left( \sum_{w_i} EQ\left(\left(v_2, \cdots, v_{\mu}\right)\right) +/// \cdot q_n\left(1, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) +/// \cdot q_n\left(0, w_2, \cdots, w_{\mu}, y_1, \cdots, y_{\nu}\right) \right) +/// \right) +/// $$ /// /// [1]: https://eprint.iacr.org/2023/1284 #[allow(clippy::too_many_arguments)] @@ -140,11 +156,10 @@ pub fn sum_check_prove_higher_degree< H: ElementHasher, >( evaluator: &impl LogUpGkrEvaluator::BaseField>, + evaluation_point: Vec, claim: E, r_sum_check: E, - rand_merge: Vec, log_up_randomness: Vec, - merged_mls: &mut [MultiLinearPoly], mls: &mut [MultiLinearPoly], coin: &mut C, ) -> Result, SumCheckProverError> { @@ -152,22 +167,18 @@ pub fn sum_check_prove_higher_degree< let mut round_proofs = vec![]; + // split the evaluation point into two points of dimension mu and nu, respectively + let mu = evaluator.get_num_fractions().trailing_zeros() - 1; + let (evaluation_point_mu, evaluation_point_nu) = evaluation_point.split_at(mu as usize); + let eq_mu = EqFunction::ml_at(evaluation_point_mu.to_vec()).evaluations().to_vec(); + let mut eq_nu = EqFunction::ml_at(evaluation_point_nu.to_vec()); + // setup first round claim let mut current_round_claim = SumCheckRoundClaim { eval_point: vec![], claim }; - // compute, for all (w_1, \cdots, w_{\mu - 1}) in {0, 1}^{\mu - 1}: - // EQ\left(\left(r_1, \cdots, r_{\mu - 1}\right), \left(w_1, \cdots, w_{\mu - 1}\right)\right) - let tensored_merge_randomness = EqFunction::ml_at(rand_merge.to_vec()).evaluations().to_vec(); - // run the first round of the protocol - let round_poly_evals = sumcheck_round( - evaluator, - mls, - merged_mls, - &log_up_randomness, - r_sum_check, - &tensored_merge_randomness, - ); + let round_poly_evals = + sumcheck_round(&eq_mu, evaluator, &eq_nu, mls, &log_up_randomness, r_sum_check); let round_poly_coefs = round_poly_evals.to_poly(current_round_claim.claim); // reseed with the s_0 polynomial @@ -186,22 +197,12 @@ pub fn sum_check_prove_higher_degree< // fold each multi-linear using the round challenge mls.iter_mut() .for_each(|ml| ml.bind_least_significant_variable(round_challenge)); - - // fold each merged multi-linear using the round challenge - merged_mls - .iter_mut() - .for_each(|ml| ml.bind_least_significant_variable(round_challenge)); + eq_nu.bind_least_significant_variable(round_challenge); // run the i-th round of the protocol using the folded multi-linears for the new reduced // claim. This basically computes the s_i polynomial. - let round_poly_evals = sumcheck_round( - evaluator, - mls, - merged_mls, - &log_up_randomness, - r_sum_check, - &tensored_merge_randomness, - ); + let round_poly_evals = + sumcheck_round(&eq_mu, evaluator, &eq_nu, mls, &log_up_randomness, r_sum_check); // update the claim current_round_claim = new_round_claim; @@ -221,10 +222,7 @@ pub fn sum_check_prove_higher_degree< // fold each multi-linear using the last random round challenge mls.iter_mut() .for_each(|ml| ml.bind_least_significant_variable(round_challenge)); - // fold each merged multi-linear using the last random round challenge - merged_mls - .iter_mut() - .for_each(|ml| ml.bind_least_significant_variable(round_challenge)); + eq_nu.bind_least_significant_variable(round_challenge); let SumCheckRoundClaim { eval_point, claim: _claim } = reduce_claim(&round_proofs[num_rounds - 1], current_round_claim, round_challenge); @@ -279,83 +277,191 @@ pub fn sum_check_prove_higher_degree< /// added to each multi-linear to compute the evaluation at the next point, and `evals_x` to hold /// the current evaluation at $x$ in $\{2, ... , d_max\}$. fn sumcheck_round( + eq_mu: &[E], evaluator: &impl LogUpGkrEvaluator::BaseField>, + eq_ml: &MultiLinearPoly, mls: &[MultiLinearPoly], - merged_mls: &[MultiLinearPoly], log_up_randomness: &[E], r_sum_check: E, - tensored_merge_randomness: &[E], ) -> CompressedUnivariatePolyEvals { let num_ml = mls.len(); let num_vars = mls[0].num_variables(); let num_rounds = num_vars - 1; - let mut evals_one = vec![E::ZERO; num_ml]; - let mut evals_zero = vec![E::ZERO; num_ml]; - let mut evals_x = vec![E::ZERO; num_ml]; - - let mut deltas = vec![E::ZERO; num_ml]; - - let mut numerators = vec![E::ZERO; evaluator.get_num_fractions()]; - let mut denominators = vec![E::ZERO; evaluator.get_num_fractions()]; - - let total_evals = (0..1 << num_rounds).map(|i| { - let mut total_evals = vec![E::ZERO; evaluator.max_degree()]; - - for (j, ml) in mls.iter().enumerate() { - evals_zero[j] = ml.evaluations()[2 * i]; - - evals_one[j] = ml.evaluations()[2 * i + 1]; - } - - let eq_at_zero = merged_mls[4].evaluations()[2 * i]; - let eq_at_one = merged_mls[4].evaluations()[2 * i + 1]; - - let p0 = merged_mls[0][2 * i + 1]; - let p1 = merged_mls[1][2 * i + 1]; - let q0 = merged_mls[2][2 * i + 1]; - let q1 = merged_mls[3][2 * i + 1]; - - total_evals[0] = comb_func(p0, p1, q0, q1, eq_at_one, r_sum_check); - - evals_zero - .iter() - .zip(evals_one.iter().zip(deltas.iter_mut().zip(evals_x.iter_mut()))) - .for_each(|(a0, (a1, (delta, evx)))| { - *delta = *a1 - *a0; - *evx = *a1; - }); - let eq_delta = eq_at_one - eq_at_zero; - let mut eq_x = eq_at_one; - - for e in total_evals.iter_mut().skip(1) { - evals_x.iter_mut().zip(deltas.iter()).for_each(|(evx, delta)| { - *evx += *delta; - }); - eq_x += eq_delta; - - evaluator.evaluate_query( - &evals_x, - log_up_randomness, - &mut numerators, - &mut denominators, - ); - - *e = evaluate_composition_poly( - &numerators, - &denominators, - eq_x, - r_sum_check, - tensored_merge_randomness, - ); - } - - total_evals - }); - - let evaluations = total_evals.fold(vec![E::ZERO; evaluator.max_degree()], |mut acc, evals| { - acc.iter_mut().zip(evals.iter()).for_each(|(a, ev)| *a += *ev); - acc - }); + + #[cfg(not(feature = "concurrent"))] + let evaluations = { + let mut evals_one = vec![E::ZERO; num_ml]; + let mut evals_zero = vec![E::ZERO; num_ml]; + let mut evals_x = vec![E::ZERO; num_ml]; + let mut eq_x = E::ZERO; + + let mut deltas = vec![E::ZERO; num_ml]; + let mut eq_delta = E::ZERO; + + let mut numerators = vec![E::ZERO; evaluator.get_num_fractions()]; + let mut denominators = vec![E::ZERO; evaluator.get_num_fractions()]; + (0..1 << num_rounds) + .map(|i| { + let mut total_evals = vec![E::ZERO; evaluator.max_degree()]; + + for (j, ml) in mls.iter().enumerate() { + evals_zero[j] = ml.evaluations()[2 * i]; + evals_one[j] = ml.evaluations()[2 * i + 1]; + } + + let eq_at_zero = eq_ml.evaluations()[2 * i]; + let eq_at_one = eq_ml.evaluations()[2 * i + 1]; + + // compute the evaluation at 1 + evaluator.evaluate_query( + &evals_one, + log_up_randomness, + &mut numerators, + &mut denominators, + ); + total_evals[0] = evaluate_composition_poly( + eq_mu, + &numerators, + &denominators, + eq_at_one, + r_sum_check, + ); + + // compute the evaluations at 2, ..., d_max points + evals_zero + .iter() + .zip(evals_one.iter().zip(deltas.iter_mut().zip(evals_x.iter_mut()))) + .for_each(|(a0, (a1, (delta, evx)))| { + *delta = *a1 - *a0; + *evx = *a1; + }); + eq_delta = eq_at_one - eq_at_zero; + eq_x = eq_at_one; + + for e in total_evals.iter_mut().skip(1) { + evals_x.iter_mut().zip(deltas.iter()).for_each(|(evx, delta)| { + *evx += *delta; + }); + eq_x += eq_delta; + + evaluator.evaluate_query( + &evals_x, + log_up_randomness, + &mut numerators, + &mut denominators, + ); + *e = evaluate_composition_poly( + eq_mu, + &numerators, + &denominators, + eq_x, + r_sum_check, + ); + } + + total_evals + }) + .fold(vec![E::ZERO; evaluator.max_degree()], |mut acc, poly_eval| { + acc.iter_mut().zip(poly_eval.iter()).for_each(|(a, b)| { + *a += *b; + }); + acc + }) + }; + + #[cfg(feature = "concurrent")] + let evaluations = (0..1 << num_rounds) + .into_par_iter() + .fold( + || { + ( + vec![E::ZERO; num_ml], + vec![E::ZERO; num_ml], + vec![E::ZERO; num_ml], + vec![E::ZERO; evaluator.max_degree()], + vec![E::ZERO; evaluator.get_num_fractions()], + vec![E::ZERO; evaluator.get_num_fractions()], + vec![E::ZERO; num_ml], + ) + }, + |( + mut evals_zero, + mut evals_one, + mut evals_x, + mut poly_evals, + mut numerators, + mut denominators, + mut deltas, + ), + i| { + for (j, ml) in mls.iter().enumerate() { + evals_zero[j] = ml.evaluations()[2 * i]; + evals_one[j] = ml.evaluations()[2 * i + 1]; + } + + let eq_at_zero = eq_ml.evaluations()[2 * i]; + let eq_at_one = eq_ml.evaluations()[2 * i + 1]; + + // compute the evaluation at 1 + evaluator.evaluate_query( + &evals_one, + log_up_randomness, + &mut numerators, + &mut denominators, + ); + poly_evals[0] = evaluate_composition_poly( + eq_mu, + &numerators, + &denominators, + eq_at_one, + r_sum_check, + ); + + // compute the evaluations at 2, ..., d_max points + evals_zero + .iter() + .zip(evals_one.iter().zip(deltas.iter_mut().zip(evals_x.iter_mut()))) + .for_each(|(a0, (a1, (delta, evx)))| { + *delta = *a1 - *a0; + *evx = *a1; + }); + let eq_delta = eq_at_one - eq_at_zero; + let mut eq_x = eq_at_one; + + for e in poly_evals.iter_mut().skip(1) { + evals_x.iter_mut().zip(deltas.iter()).for_each(|(evx, delta)| { + *evx += *delta; + }); + eq_x += eq_delta; + + evaluator.evaluate_query( + &evals_x, + log_up_randomness, + &mut numerators, + &mut denominators, + ); + *e = evaluate_composition_poly( + eq_mu, + &numerators, + &denominators, + eq_x, + r_sum_check, + ); + } + + (evals_zero, evals_one, evals_x, poly_evals, numerators, denominators, deltas) + }, + ) + .map(|(_, _, _, poly_evals, ..)| poly_evals) + .reduce( + || vec![E::ZERO; evaluator.max_degree()], + |mut acc, poly_eval| { + acc.iter_mut().zip(poly_eval.iter()).for_each(|(a, b)| { + *a += *b; + }); + acc + }, + ); CompressedUnivariatePolyEvals(evaluations.into()) } diff --git a/sumcheck/src/prover/plain.rs b/sumcheck/src/prover/plain.rs index 8e53c17c0..718229f58 100644 --- a/sumcheck/src/prover/plain.rs +++ b/sumcheck/src/prover/plain.rs @@ -5,6 +5,8 @@ use crypto::{ElementHasher, RandomCoin}; use math::FieldElement; +#[cfg(feature = "concurrent")] +pub use rayon::prelude::*; use super::SumCheckProverError; use crate::{ @@ -48,7 +50,6 @@ pub fn sumcheck_prove_plain< C: RandomCoin, H: ElementHasher, >( - num_rounds: usize, claim: E, r_batch: E, p0: &mut MultiLinearPoly, @@ -57,44 +58,95 @@ pub fn sumcheck_prove_plain< q1: &mut MultiLinearPoly, eq: &mut MultiLinearPoly, transcript: &mut C, -) -> Result<(SumCheckProof, E), SumCheckProverError> { +) -> Result, SumCheckProverError> { let mut round_proofs = vec![]; let mut claim = claim; let mut challenges = vec![]; - for _ in 0..num_rounds { - let mut eval_point_0 = E::ZERO; - let mut eval_point_2 = E::ZERO; - let mut eval_point_3 = E::ZERO; + for _ in 0..p0.num_variables() { let len = p0.num_evaluations() / 2; - for i in 0..len { - eval_point_0 += - comb_func(p0[2 * i], p1[2 * i], q0[2 * i], q1[2 * i], eq[2 * i], r_batch); - - let p0_delta = p0[2 * i + 1] - p0[2 * i]; - let p1_delta = p1[2 * i + 1] - p1[2 * i]; - let q0_delta = q0[2 * i + 1] - q0[2 * i]; - let q1_delta = q1[2 * i + 1] - q1[2 * i]; - let eq_delta = eq[2 * i + 1] - eq[2 * i]; - - let mut p0_evx = p0[2 * i + 1] + p0_delta; - let mut p1_evx = p1[2 * i + 1] + p1_delta; - let mut q0_evx = q0[2 * i + 1] + q0_delta; - let mut q1_evx = q1[2 * i + 1] + q1_delta; - let mut eq_evx = eq[2 * i + 1] + eq_delta; - eval_point_2 += comb_func(p0_evx, p1_evx, q0_evx, q1_evx, eq_evx, r_batch); - - p0_evx += p0_delta; - p1_evx += p1_delta; - q0_evx += q0_delta; - q1_evx += q1_delta; - eq_evx += eq_delta; - eval_point_3 += comb_func(p0_evx, p1_evx, q0_evx, q1_evx, eq_evx, r_batch); - } + + #[cfg(not(feature = "concurrent"))] + let (eval_point_1, eval_point_2, eval_point_3) = + (0..len).fold((E::ZERO, E::ZERO, E::ZERO), |(a, b, c), i| { + let eval_point_1 = comb_func( + p0[2 * i + 1], + p1[2 * i + 1], + q0[2 * i + 1], + q1[2 * i + 1], + eq[2 * i + 1], + r_batch, + ); + + let p0_delta = p0[2 * i + 1] - p0[2 * i]; + let p1_delta = p1[2 * i + 1] - p1[2 * i]; + let q0_delta = q0[2 * i + 1] - q0[2 * i]; + let q1_delta = q1[2 * i + 1] - q1[2 * i]; + let eq_delta = eq[2 * i + 1] - eq[2 * i]; + + let mut p0_evx = p0[2 * i + 1] + p0_delta; + let mut p1_evx = p1[2 * i + 1] + p1_delta; + let mut q0_evx = q0[2 * i + 1] + q0_delta; + let mut q1_evx = q1[2 * i + 1] + q1_delta; + let mut eq_evx = eq[2 * i + 1] + eq_delta; + let eval_point_2 = comb_func(p0_evx, p1_evx, q0_evx, q1_evx, eq_evx, r_batch); + + p0_evx += p0_delta; + p1_evx += p1_delta; + q0_evx += q0_delta; + q1_evx += q1_delta; + eq_evx += eq_delta; + let eval_point_3 = comb_func(p0_evx, p1_evx, q0_evx, q1_evx, eq_evx, r_batch); + + (eval_point_1 + a, eval_point_2 + b, eval_point_3 + c) + }); + + #[cfg(feature = "concurrent")] + let (eval_point_1, eval_point_2, eval_point_3) = (0..len) + .into_par_iter() + .fold( + || (E::ZERO, E::ZERO, E::ZERO), + |(a, b, c), i| { + let eval_point_1 = comb_func( + p0[2 * i + 1], + p1[2 * i + 1], + q0[2 * i + 1], + q1[2 * i + 1], + eq[2 * i + 1], + r_batch, + ); + + let p0_delta = p0[2 * i + 1] - p0[2 * i]; + let p1_delta = p1[2 * i + 1] - p1[2 * i]; + let q0_delta = q0[2 * i + 1] - q0[2 * i]; + let q1_delta = q1[2 * i + 1] - q1[2 * i]; + let eq_delta = eq[2 * i + 1] - eq[2 * i]; + + let mut p0_evx = p0[2 * i + 1] + p0_delta; + let mut p1_evx = p1[2 * i + 1] + p1_delta; + let mut q0_evx = q0[2 * i + 1] + q0_delta; + let mut q1_evx = q1[2 * i + 1] + q1_delta; + let mut eq_evx = eq[2 * i + 1] + eq_delta; + let eval_point_2 = comb_func(p0_evx, p1_evx, q0_evx, q1_evx, eq_evx, r_batch); + + p0_evx += p0_delta; + p1_evx += p1_delta; + q0_evx += q0_delta; + q1_evx += q1_delta; + eq_evx += eq_delta; + let eval_point_3 = comb_func(p0_evx, p1_evx, q0_evx, q1_evx, eq_evx, r_batch); + + (eval_point_1 + a, eval_point_2 + b, eval_point_3 + c) + }, + ) + .reduce( + || (E::ZERO, E::ZERO, E::ZERO), + |(a0, b0, c0), (a1, b1, c1)| (a0 + a1, b0 + b1, c0 + c1), + ); let evals = vec![ - claim - eval_point_0, // Optimization applied using the claim to reduce the number of sums computed + eval_point_1, // Optimization applied using the claim to reduce the number of sums computed eval_point_2, eval_point_3, ]; @@ -127,14 +179,11 @@ pub fn sumcheck_prove_plain< claim = new_claim; } - Ok(( - SumCheckProof { - openings_claim: FinalOpeningClaim { - eval_point: challenges, - openings: vec![p0[0], p1[0], q0[0], q1[0]], - }, - round_proofs, + Ok(SumCheckProof { + openings_claim: FinalOpeningClaim { + eval_point: challenges, + openings: vec![p0[0], p1[0], q0[0], q1[0]], }, - claim, - )) + round_proofs, + }) } diff --git a/sumcheck/src/verifier/mod.rs b/sumcheck/src/verifier/mod.rs index 010788f17..18bb374b9 100644 --- a/sumcheck/src/verifier/mod.rs +++ b/sumcheck/src/verifier/mod.rs @@ -19,7 +19,7 @@ pub fn verify_rounds( claim: E, round_proofs: &[RoundProof], coin: &mut C, -) -> Result, Error> +) -> Result, SumCheckVerifierError> where E: FieldElement, C: RandomCoin, @@ -31,7 +31,7 @@ where let round_poly_coefs = round_proof.round_poly_coefs.clone(); coin.reseed(H::hash_elements(&round_poly_coefs.0)); - let r = coin.draw().map_err(|_| Error::FailedToGenerateChallenge)?; + let r = coin.draw().map_err(|_| SumCheckVerifierError::FailedToGenerateChallenge)?; round_claim = round_proof.round_poly_coefs.evaluate_using_claim(&round_claim, &r); evaluation_point.push(r); @@ -54,10 +54,12 @@ pub fn verify_sum_check_intermediate_layers< gkr_eval_point: &[E], claim: (E, E), transcript: &mut C, -) -> Result, Error> { +) -> Result, SumCheckVerifierError> { // generate challenge to batch sum-checks transcript.reseed(H::hash_elements(&[claim.0, claim.1])); - let r_batch: E = transcript.draw().map_err(|_| Error::FailedToGenerateChallenge)?; + let r_batch: E = transcript + .draw() + .map_err(|_| SumCheckVerifierError::FailedToGenerateChallenge)?; // compute the claim for the batched sum-check let reduced_claim = claim.0 + claim.1 * r_batch; @@ -75,13 +77,15 @@ pub fn verify_sum_check_intermediate_layers< let eq = EqFunction::new(gkr_eval_point.to_vec()).evaluate(&openings_claim.eval_point); if (p0 * q1 + p1 * q0 + r_batch * q0 * q1) * eq != final_round_claim.claim { - return Err(Error::FinalEvaluationCheckFailed); + return Err(SumCheckVerifierError::FinalEvaluationCheckFailed); } Ok(openings_claim.clone()) } -/// Verifies the final sum-check proof of a GKR proof. +/// Verifies the final sum-check proof i.e., the one for the input layer, including the final check, +/// and returns a [`FinalOpeningClaim`] to the STARK verifier in order to verify the correctness of +/// the openings. pub fn verify_sum_check_input_layer< E: FieldElement, C: RandomCoin, @@ -93,95 +97,55 @@ pub fn verify_sum_check_input_layer< gkr_eval_point: &[E], claim: (E, E), transcript: &mut C, -) -> Result, Error> { - let FinalLayerProof { before_merge_proof, after_merge_proof } = proof; +) -> Result, SumCheckVerifierError> { + let FinalLayerProof { proof } = proof; // generate challenge to batch sum-checks transcript.reseed(H::hash_elements(&[claim.0, claim.1])); - let r_batch: E = transcript.draw().map_err(|_| Error::FailedToGenerateChallenge)?; + let r_batch: E = transcript + .draw() + .map_err(|_| SumCheckVerifierError::FailedToGenerateChallenge)?; // compute the claim for the batched sum-check let reduced_claim = claim.0 + claim.1 * r_batch; - // verify the first half of the sum-check proof i.e., `before_merge_proof` - let SumCheckRoundClaim { eval_point: rand_merge, claim } = - verify_rounds(reduced_claim, before_merge_proof, transcript)?; - - // verify the second half of the sum-check proof i.e., `after_merge_proof` - verify_sum_check_final( - claim, - after_merge_proof, - rand_merge, - r_batch, - log_up_randomness, - gkr_eval_point, - evaluator, - transcript, - ) -} + // verify the sum-check proof + let SumCheckRoundClaim { eval_point, claim } = + verify_rounds(reduced_claim, &proof.round_proofs, transcript)?; -/// Verifies the second sum-check proof for the input layer, including the final check, and returns -/// a [`FinalOpeningClaim`] to the STARK verifier in order to verify the correctness of -/// the openings. -#[allow(clippy::too_many_arguments)] -fn verify_sum_check_final< - E: FieldElement, - C: RandomCoin, - H: ElementHasher, ->( - claim: E, - after_merge_proof: &SumCheckProof, - rand_merge: Vec, - r_batch: E, - log_up_randomness: Vec, - gkr_eval_point: &[E], - evaluator: &impl LogUpGkrEvaluator, - transcript: &mut C, -) -> Result, Error> { - let SumCheckProof { openings_claim, round_proofs } = after_merge_proof; - - let SumCheckRoundClaim { - eval_point: evaluation_point, - claim: claimed_evaluation, - } = verify_rounds(claim, round_proofs, transcript)?; - - if openings_claim.eval_point != evaluation_point { - return Err(Error::WrongOpeningPoint); + // execute the final evaluation check + if proof.openings_claim.eval_point != eval_point { + return Err(SumCheckVerifierError::WrongOpeningPoint); } let mut numerators = vec![E::ZERO; evaluator.get_num_fractions()]; let mut denominators = vec![E::ZERO; evaluator.get_num_fractions()]; - evaluator.evaluate_query( - &openings_claim.openings, + &proof.openings_claim.openings, &log_up_randomness, &mut numerators, &mut denominators, ); - let lagrange_ker = EqFunction::new(gkr_eval_point.to_vec()); - let mut gkr_point = rand_merge.clone(); - - gkr_point.extend_from_slice(&openings_claim.eval_point.clone()); - let eq_eval = lagrange_ker.evaluate(&gkr_point); - let tensored_merge_randomness = EqFunction::ml_at(rand_merge.to_vec()).evaluations().to_vec(); - let expected_evaluation = evaluate_composition_poly( - &numerators, - &denominators, - eq_eval, - r_batch, - &tensored_merge_randomness, - ); + let mu = evaluator.get_num_fractions().trailing_zeros() - 1; + let (evaluation_point_mu, evaluation_point_nu) = gkr_eval_point.split_at(mu as usize); + + let eq_mu = EqFunction::new(evaluation_point_mu.to_vec()).evaluations(); + let eq_nu = EqFunction::new(evaluation_point_nu.to_vec()); + + let eq_nu_eval = eq_nu.evaluate(&proof.openings_claim.eval_point); + let expected_evaluation = + evaluate_composition_poly(&eq_mu, &numerators, &denominators, eq_nu_eval, r_batch); - if expected_evaluation != claimed_evaluation { - Err(Error::FinalEvaluationCheckFailed) + if expected_evaluation != claim { + Err(SumCheckVerifierError::FinalEvaluationCheckFailed) } else { - Ok(openings_claim.clone()) + Ok(proof.openings_claim.clone()) } } #[derive(Debug, thiserror::Error)] -pub enum Error { +pub enum SumCheckVerifierError { #[error("the final evaluation check of sum-check failed")] FinalEvaluationCheckFailed, #[error("failed to generate round challenge")] From 2e5a704499b9a6eb906974354d2c0f216c3608d8 Mon Sep 17 00:00:00 2001 From: Al-Kindi-0 <82364884+Al-Kindi-0@users.noreply.github.com> Date: Thu, 15 Aug 2024 15:39:54 +0200 Subject: [PATCH 13/28] chore: fix Sync issue --- air/src/air/logup_gkr.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/air/src/air/logup_gkr.rs b/air/src/air/logup_gkr.rs index 22ece038d..12ecac662 100644 --- a/air/src/air/logup_gkr.rs +++ b/air/src/air/logup_gkr.rs @@ -8,7 +8,7 @@ use alloc::vec::Vec; use super::EvaluationFrame; use math::{ExtensionOf, FieldElement, StarkField, ToElements}; -pub trait LogUpGkrEvaluator: Clone { +pub trait LogUpGkrEvaluator: Clone + Sync { /// Defines the base field of the evaluator. type BaseField: StarkField; From 06454d978f7dd9fc6d7597a74fa349edcab90f79 Mon Sep 17 00:00:00 2001 From: Al-Kindi-0 <82364884+Al-Kindi-0@users.noreply.github.com> Date: Thu, 15 Aug 2024 15:49:28 +0200 Subject: [PATCH 14/28] chore: pacify clippy --- sumcheck/src/multilinear.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sumcheck/src/multilinear.rs b/sumcheck/src/multilinear.rs index ede55b7ca..eb9a3e504 100644 --- a/sumcheck/src/multilinear.rs +++ b/sumcheck/src/multilinear.rs @@ -235,12 +235,12 @@ impl EqFunction { /// We thus have the following algorithm: /// /// 1. Split current table, stored as a vector, $A^{(j)}\left[\left(w_{1}, \dots, w_{j} \right)\right]$ -/// into two tables $A^{(j)}\left[\left(w_{1}, \dots, w_{j-1}, 0 \right)\right]$ and -/// $A^{(j)}\left[\left(w_{1}, \dots, w_{j-1}, 1 \right)\right]$, -/// with the first part initialized to $A^{(j - 1)}\left[\left(w_{1}, \dots, w_{j-1} \right)\right]$. +/// into two tables $A^{(j)}\left[\left(w_{1}, \dots, w_{j-1}, 0 \right)\right]$ and +/// $A^{(j)}\left[\left(w_{1}, \dots, w_{j-1}, 1 \right)\right]$, +/// with the first part initialized to $A^{(j - 1)}\left[\left(w_{1}, \dots, w_{j-1} \right)\right]$. /// 2. Iterating over $\left(w_{1}, \dots, w_{j-1} \right)$, do: /// 1. Let $factor = A^{(j - 1)}\left[\left(w_{1}, \dots, w_{j-1} \right)\right]$, which is equal -/// by the above to $A^{(j)}\left[\left(w_{1}, \dots, w_{j-1}, 0 \right)\right]$. +/// by the above to $A^{(j)}\left[\left(w_{1}, \dots, w_{j-1}, 0 \right)\right]$. /// 2. $A^{(j)}\left[\left(w_{1}, \dots, w_{j-1}, 1 \right)\right] = factor \cdot r_j$ /// 3. $A^{(j)}\left[\left(w_{1}, \dots, w_{j-1}, 0 \right)\right] = /// A^{(j)}\left[\left(w_{1}, \dots, w_{j-1}, 0 \right)\right] - From 2c0785745a0e5c5dc8b13868eb9960cd9d998c5a Mon Sep 17 00:00:00 2001 From: Al-Kindi-0 <82364884+Al-Kindi-0@users.noreply.github.com> Date: Mon, 19 Aug 2024 11:23:55 +0200 Subject: [PATCH 15/28] chore: fix toml formatting --- Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 1b69d99bc..7a5f28ff7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,8 +10,8 @@ members = [ "prover", "verifier", "winterfell", - "examples" -, "sumcheck"] + "examples", + "sumcheck",] resolver = "2" [profile.release] From 2e02f1f9d21656e65e3ca6495d879b532ac17d48 Mon Sep 17 00:00:00 2001 From: Al-Kindi-0 <82364884+Al-Kindi-0@users.noreply.github.com> Date: Mon, 19 Aug 2024 12:52:44 +0200 Subject: [PATCH 16/28] feat: add benchmarks and address feedback --- air/src/air/logup_gkr.rs | 12 ++-- sumcheck/Cargo.toml | 10 +++ sumcheck/benches/bind_variable.rs | 90 +++++++++++++++++++++++++++ sumcheck/benches/eq_function.rs | 98 ++++++++++++++++++++++++++++++ sumcheck/src/multilinear.rs | 9 +-- sumcheck/src/prover/high_degree.rs | 7 +-- sumcheck/src/prover/plain.rs | 10 +-- sumcheck/src/verifier/mod.rs | 6 +- 8 files changed, 221 insertions(+), 21 deletions(-) create mode 100644 sumcheck/benches/bind_variable.rs create mode 100644 sumcheck/benches/eq_function.rs diff --git a/air/src/air/logup_gkr.rs b/air/src/air/logup_gkr.rs index 12ecac662..bbdd150b0 100644 --- a/air/src/air/logup_gkr.rs +++ b/air/src/air/logup_gkr.rs @@ -34,7 +34,7 @@ pub trait LogUpGkrEvaluator: Clone + Sync { /// information returned from `get_oracles()`. However, this implementation is likely to be /// expensive compared to the hand-written implementation. However, we could provide a test /// which verifies that `get_oracles()` and `build_query()` methods are consistent. - fn build_query(&self, frame: &EvaluationFrame, periodic_values: &[E]) -> Vec + fn build_query(&self, frame: &EvaluationFrame, periodic_values: &[E], query: &mut [E]) where E: FieldElement; @@ -49,8 +49,8 @@ pub trait LogUpGkrEvaluator: Clone + Sync { &self, query: &[F], rand_values: &[E], - numerator: &mut [E], - denominator: &mut [E], + numerators: &mut [E], + denominators: &mut [E], ) where F: FieldElement, E: FieldElement + ExtensionOf; @@ -61,9 +61,11 @@ pub trait LogUpGkrEvaluator: Clone + Sync { /// fractional sums will cancel out. However, in cases when some boundary conditions need to /// be imposed on the LogUp-GKR relations, this method can be overridden to compute the final /// expected claim. - fn compute_claim(&self, inputs: &Self::PublicInputs, rand_values: &[E]) -> E + fn compute_claim(&self, _inputs: &Self::PublicInputs, _rand_values: &[E]) -> E where - E: FieldElement; + E: FieldElement{ + E::ZERO + } } #[derive(Clone, Debug, PartialEq, PartialOrd, Eq, Ord)] diff --git a/sumcheck/Cargo.toml b/sumcheck/Cargo.toml index 865f0dbf8..db6b17574 100644 --- a/sumcheck/Cargo.toml +++ b/sumcheck/Cargo.toml @@ -12,6 +12,16 @@ keywords = ["crypto", "sumcheck", "iop"] edition = "2021" rust-version = "1.78" +[[bench]] +name = "eq_function" +harness = false +required-features = ["concurrent"] + +[[bench]] +name = "bind_variable" +harness = false +required-features = ["concurrent"] + [features] concurrent = ["utils/concurrent", "dep:rayon", "std"] default = ["std"] diff --git a/sumcheck/benches/bind_variable.rs b/sumcheck/benches/bind_variable.rs new file mode 100644 index 000000000..f7e82126c --- /dev/null +++ b/sumcheck/benches/bind_variable.rs @@ -0,0 +1,90 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +use std::time::Duration; + +use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion}; +use math::{fields::f64::BaseElement, FieldElement}; +use rand_utils::{rand_value, rand_vector}; +#[cfg(feature = "concurrent")] +pub use rayon::prelude::*; + +const POLY_SIZE: [usize; 2] = [1 << 18, 1 << 20]; + +fn bind_variable_serial(c: &mut Criterion) { + let mut group = c.benchmark_group("Bind variable evaluations"); + group.sample_size(10); + group.measurement_time(Duration::from_secs(10)); + + for &poly_size in POLY_SIZE.iter() { + group.bench_function(BenchmarkId::new("serial", poly_size), |b| { + b.iter_batched( + || { + let random_challenge: BaseElement = rand_value(); + let poly_evals: Vec = rand_vector(poly_size); + (random_challenge, poly_evals) + }, + |(random_challenge, poly_evals)| { + let mut poly_evals = poly_evals; + bind_least_significant_variable_serial(&mut poly_evals, random_challenge) + }, + BatchSize::SmallInput, + ) + }); + } +} + +fn bind_variable_parallel(c: &mut Criterion) { + let mut group = c.benchmark_group("Bind variable function evaluations"); + group.sample_size(10); + group.measurement_time(Duration::from_secs(10)); + + for &poly_size in POLY_SIZE.iter() { + group.bench_function(BenchmarkId::new("parallel", poly_size), |b| { + b.iter_batched( + || { + let random_challenge: BaseElement = rand_value(); + let poly_evals: Vec = rand_vector(poly_size); + (random_challenge, poly_evals) + }, + |(random_challenge, poly_evals)| { + let mut poly_evals = poly_evals; + bind_least_significant_variable_parallel(&mut poly_evals, random_challenge) + }, + BatchSize::SmallInput, + ) + }); + } +} + +fn bind_least_significant_variable_serial( + evaluations: &mut Vec, + round_challenge: E, +) { + let num_evals = evaluations.len() >> 1; + + for i in 0..num_evals { + evaluations[i] = evaluations[i << 1] + + round_challenge * (evaluations[(i << 1) + 1] - evaluations[i << 1]); + } + evaluations.truncate(num_evals); +} + +fn bind_least_significant_variable_parallel( + evaluations: &mut Vec, + round_challenge: E, +) { + let num_evals = evaluations.len() >> 1; + + let mut result = unsafe { utils::uninit_vector(num_evals) }; + result.par_iter_mut().enumerate().for_each(|(i, ev)| { + *ev = evaluations[i << 1] + + round_challenge * (evaluations[(i << 1) + 1] - evaluations[i << 1]) + }); + *evaluations = result +} + +criterion_group!(group, bind_variable_serial, bind_variable_parallel); +criterion_main!(group); diff --git a/sumcheck/benches/eq_function.rs b/sumcheck/benches/eq_function.rs new file mode 100644 index 000000000..4aee5afa4 --- /dev/null +++ b/sumcheck/benches/eq_function.rs @@ -0,0 +1,98 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +use std::time::Duration; + +use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion}; +use math::{fields::f64::BaseElement, FieldElement}; +use rand_utils::rand_vector; +#[cfg(feature = "concurrent")] +pub use rayon::prelude::*; + +const LOG_POLY_SIZE: [usize; 2] = [18, 20]; + +fn evaluate_eq_serial(c: &mut Criterion) { + let mut group = c.benchmark_group("EQ function evaluations"); + group.sample_size(10); + group.measurement_time(Duration::from_secs(10)); + + for &log_poly_size in LOG_POLY_SIZE.iter() { + group.bench_function(BenchmarkId::new("serial", log_poly_size), |b| { + b.iter_batched( + || { + let randomness: Vec = rand_vector(log_poly_size); + randomness + }, + |rand| eq_evaluations(&rand), + BatchSize::SmallInput, + ) + }); + } +} + +fn evaluate_eq_parallel(c: &mut Criterion) { + let mut group = c.benchmark_group("EQ function evaluations"); + group.sample_size(10); + group.measurement_time(Duration::from_secs(10)); + + for &log_poly_size in LOG_POLY_SIZE.iter() { + group.bench_function(BenchmarkId::new("parallel", log_poly_size), |b| { + b.iter_batched( + || { + let randomness: Vec = rand_vector(log_poly_size); + randomness + }, + |rand| eq_evaluations_par(&rand), + BatchSize::SmallInput, + ) + }); + } +} + + +fn eq_evaluations(query: &[E]) -> Vec { + let n = 1 << query.len(); + let mut evals = unsafe { utils::uninit_vector(n) }; + + let mut size = 1; + evals[0] = E::ONE; + for r_i in query.iter() { + let (left_evals, right_evals) = evals.split_at_mut(size); + left_evals.iter_mut().zip(right_evals.iter_mut()).for_each(|(left, right)| { + let factor = *left; + *right = factor * *r_i; + *left -= *right; + }); + + size *= 2; + } + evals +} + +fn eq_evaluations_par(query: &[E]) -> Vec { + let n = 1 << query.len(); + let mut evals = unsafe { utils::uninit_vector(n) }; + + let mut size = 1; + evals[0] = E::ONE; + for r_i in query.iter() { + let (left_evals, right_evals) = evals.split_at_mut(size); + left_evals + .par_iter_mut() + .zip(right_evals.par_iter_mut()) + .for_each(|(left, right)| { + let factor = *left; + *right = factor * *r_i; + *left -= *right; + }); + + size <<= 1; + } + evals +} + + +criterion_group!(group, evaluate_eq_serial, evaluate_eq_parallel); +criterion_main!(group); \ No newline at end of file diff --git a/sumcheck/src/multilinear.rs b/sumcheck/src/multilinear.rs index eb9a3e504..ad46c7b44 100644 --- a/sumcheck/src/multilinear.rs +++ b/sumcheck/src/multilinear.rs @@ -183,7 +183,7 @@ pub struct EqFunction { impl EqFunction { /// Creates a new [EqFunction]. - pub fn new(r: Vec) -> Self { + pub fn new(r: SmallVec<[E; MAX_EQ_SIZE]>) -> Self { let tmp = r.into(); EqFunction { r: tmp } } @@ -206,8 +206,8 @@ impl EqFunction { /// Returns the evaluations of /// $((y_0, ..., y_{{\nu} - 1}) \mapsto \tilde{EQ}((r_0, ..., r_{{\nu} - 1}), (y_0, ..., y_{{\nu} - 1})))$ /// over ${0 , 1}^{\nu}$. - pub fn ml_at(evaluation_point: Vec) -> MultiLinearPoly { - let eq_evals = EqFunction::new(evaluation_point.clone()).evaluations(); + pub fn ml_at(evaluation_point: SmallVec<[E; MAX_EQ_SIZE]>) -> MultiLinearPoly { + let eq_evals = EqFunction::new(evaluation_point).evaluations(); MultiLinearPoly::from_evaluations(eq_evals) } } @@ -345,13 +345,14 @@ fn test_bind() { fn test_eq_function() { use math::fields::f64::BaseElement; use rand_utils::rand_value; + use smallvec::smallvec; let one = BaseElement::ONE; // Lagrange kernel is computed correctly let r0 = rand_value(); let r1 = rand_value(); - let eq_function = EqFunction::new(vec![r0, r1]); + let eq_function = EqFunction::new(smallvec![r0, r1]); let expected = vec![(one - r0) * (one - r1), r0 * (one - r1), (one - r0) * r1, r0 * r1]; diff --git a/sumcheck/src/prover/high_degree.rs b/sumcheck/src/prover/high_degree.rs index 441f7d7bd..78e92de66 100644 --- a/sumcheck/src/prover/high_degree.rs +++ b/sumcheck/src/prover/high_degree.rs @@ -152,7 +152,6 @@ use crate::{ #[allow(clippy::too_many_arguments)] pub fn sum_check_prove_higher_degree< E: FieldElement, - C: RandomCoin, H: ElementHasher, >( evaluator: &impl LogUpGkrEvaluator::BaseField>, @@ -161,7 +160,7 @@ pub fn sum_check_prove_higher_degree< r_sum_check: E, log_up_randomness: Vec, mls: &mut [MultiLinearPoly], - coin: &mut C, + coin: &mut impl RandomCoin, ) -> Result, SumCheckProverError> { let num_rounds = mls[0].num_variables(); @@ -170,8 +169,8 @@ pub fn sum_check_prove_higher_degree< // split the evaluation point into two points of dimension mu and nu, respectively let mu = evaluator.get_num_fractions().trailing_zeros() - 1; let (evaluation_point_mu, evaluation_point_nu) = evaluation_point.split_at(mu as usize); - let eq_mu = EqFunction::ml_at(evaluation_point_mu.to_vec()).evaluations().to_vec(); - let mut eq_nu = EqFunction::ml_at(evaluation_point_nu.to_vec()); + let eq_mu = EqFunction::ml_at(evaluation_point_mu.into()).evaluations().to_vec(); + let mut eq_nu = EqFunction::ml_at(evaluation_point_nu.into()); // setup first round claim let mut current_round_claim = SumCheckRoundClaim { eval_point: vec![], claim }; diff --git a/sumcheck/src/prover/plain.rs b/sumcheck/src/prover/plain.rs index 718229f58..133bde11b 100644 --- a/sumcheck/src/prover/plain.rs +++ b/sumcheck/src/prover/plain.rs @@ -4,6 +4,7 @@ // LICENSE file in the root directory of this source tree. use crypto::{ElementHasher, RandomCoin}; +use smallvec::smallvec; use math::FieldElement; #[cfg(feature = "concurrent")] pub use rayon::prelude::*; @@ -47,7 +48,6 @@ use crate::{ #[allow(clippy::too_many_arguments)] pub fn sumcheck_prove_plain< E: FieldElement, - C: RandomCoin, H: ElementHasher, >( claim: E, @@ -57,7 +57,7 @@ pub fn sumcheck_prove_plain< q0: &mut MultiLinearPoly, q1: &mut MultiLinearPoly, eq: &mut MultiLinearPoly, - transcript: &mut C, + transcript: &mut impl RandomCoin, ) -> Result, SumCheckProverError> { let mut round_proofs = vec![]; @@ -145,12 +145,12 @@ pub fn sumcheck_prove_plain< |(a0, b0, c0), (a1, b1, c1)| (a0 + a1, b0 + b1, c0 + c1), ); - let evals = vec![ - eval_point_1, // Optimization applied using the claim to reduce the number of sums computed + let evals = smallvec![ + eval_point_1, eval_point_2, eval_point_3, ]; - let poly = CompressedUnivariatePolyEvals(evals.into()); + let poly = CompressedUnivariatePolyEvals(evals); let round_poly_coefs = poly.to_poly(claim); // reseed with the s_i polynomial diff --git a/sumcheck/src/verifier/mod.rs b/sumcheck/src/verifier/mod.rs index 18bb374b9..6b4ee2cd4 100644 --- a/sumcheck/src/verifier/mod.rs +++ b/sumcheck/src/verifier/mod.rs @@ -74,7 +74,7 @@ pub fn verify_sum_check_intermediate_layers< let q0 = openings_claim.openings[2]; let q1 = openings_claim.openings[3]; - let eq = EqFunction::new(gkr_eval_point.to_vec()).evaluate(&openings_claim.eval_point); + let eq = EqFunction::new(gkr_eval_point.into()).evaluate(&openings_claim.eval_point); if (p0 * q1 + p1 * q0 + r_batch * q0 * q1) * eq != final_round_claim.claim { return Err(SumCheckVerifierError::FinalEvaluationCheckFailed); @@ -130,8 +130,8 @@ pub fn verify_sum_check_input_layer< let mu = evaluator.get_num_fractions().trailing_zeros() - 1; let (evaluation_point_mu, evaluation_point_nu) = gkr_eval_point.split_at(mu as usize); - let eq_mu = EqFunction::new(evaluation_point_mu.to_vec()).evaluations(); - let eq_nu = EqFunction::new(evaluation_point_nu.to_vec()); + let eq_mu = EqFunction::new(evaluation_point_mu.into()).evaluations(); + let eq_nu = EqFunction::new(evaluation_point_nu.into()); let eq_nu_eval = eq_nu.evaluate(&proof.openings_claim.eval_point); let expected_evaluation = From e7a50c02fc14fe9f0e7512d941e50469d3133af3 Mon Sep 17 00:00:00 2001 From: Al-Kindi-0 <82364884+Al-Kindi-0@users.noreply.github.com> Date: Mon, 19 Aug 2024 16:14:26 +0200 Subject: [PATCH 17/28] feat: address feedback and add benchmarks --- sumcheck/Cargo.toml | 4 + sumcheck/README.md | 16 +++ sumcheck/benches/eq_function.rs | 4 +- sumcheck/benches/sum_check_plain.rs | 154 ++++++++++++++++++++++++++++ sumcheck/src/lib.rs | 24 +---- sumcheck/src/multilinear.rs | 3 +- sumcheck/src/prover/high_degree.rs | 24 ++--- sumcheck/src/prover/plain.rs | 13 +-- sumcheck/src/verifier/mod.rs | 74 ++++++------- 9 files changed, 226 insertions(+), 90 deletions(-) create mode 100644 sumcheck/README.md create mode 100644 sumcheck/benches/sum_check_plain.rs diff --git a/sumcheck/Cargo.toml b/sumcheck/Cargo.toml index db6b17574..c1cafba60 100644 --- a/sumcheck/Cargo.toml +++ b/sumcheck/Cargo.toml @@ -12,6 +12,10 @@ keywords = ["crypto", "sumcheck", "iop"] edition = "2021" rust-version = "1.78" +[[bench]] +name = "sum_check_plain" +harness = false + [[bench]] name = "eq_function" harness = false diff --git a/sumcheck/README.md b/sumcheck/README.md new file mode 100644 index 000000000..3a705b2e0 --- /dev/null +++ b/sumcheck/README.md @@ -0,0 +1,16 @@ +# Winter sum-check +This crate contains an implementation of the sum-check protocol intended to be used for LogUp-GKR by the Winterfell STARK prover and verifier. + +## Crate features +This crate can be compiled with the following features: + +* `std` - enabled by default and relies on the Rust standard library. +* `concurrent` - implies `std` and also re-exports `rayon` crate and enables multi-threaded execution for some of the crate functions. +* `no_std` - does not rely on Rust's standard library and enables compilation to WebAssembly. + +To compile with `no_std`, disable default features via `--no-default-features` flag. + +License +------- + +This project is [MIT licensed](../LICENSE). \ No newline at end of file diff --git a/sumcheck/benches/eq_function.rs b/sumcheck/benches/eq_function.rs index 4aee5afa4..86e6cad98 100644 --- a/sumcheck/benches/eq_function.rs +++ b/sumcheck/benches/eq_function.rs @@ -51,7 +51,6 @@ fn evaluate_eq_parallel(c: &mut Criterion) { } } - fn eq_evaluations(query: &[E]) -> Vec { let n = 1 << query.len(); let mut evals = unsafe { utils::uninit_vector(n) }; @@ -93,6 +92,5 @@ fn eq_evaluations_par(query: &[E]) -> Vec { evals } - criterion_group!(group, evaluate_eq_serial, evaluate_eq_parallel); -criterion_main!(group); \ No newline at end of file +criterion_main!(group); diff --git a/sumcheck/benches/sum_check_plain.rs b/sumcheck/benches/sum_check_plain.rs new file mode 100644 index 000000000..140ce9509 --- /dev/null +++ b/sumcheck/benches/sum_check_plain.rs @@ -0,0 +1,154 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +use std::time::Duration; + +use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion}; +use crypto::{hashers::Blake3_192, DefaultRandomCoin, RandomCoin}; +use math::{fields::f64::BaseElement, FieldElement}; +use rand_utils::{rand_value, rand_vector}; +#[cfg(feature = "concurrent")] +pub use rayon::prelude::*; +use winter_sumcheck::{sumcheck_prove_plain, EqFunction, MultiLinearPoly}; + + +const LOG_POLY_SIZE: [usize; 2] = [18, 20]; + +fn sum_check_plain(c: &mut Criterion) { + let mut group = c.benchmark_group("Sum-check prover plain"); + group.sample_size(10); + group.measurement_time(Duration::from_secs(10)); + + for &log_poly_size in LOG_POLY_SIZE.iter() { + group.bench_function(BenchmarkId::new("", log_poly_size), |b| { + b.iter_batched( + || { + let transcript = DefaultRandomCoin::>::new(&vec![BaseElement::ZERO; 4]); + (setup_sum_check_2::(log_poly_size), transcript) + }, + |((claim, r_batch, p0, p1, q0, q1, eq), transcript)| { + let mut p0 = p0; + let mut p1 = p1; + let mut q0 = q0; + let mut q1 = q1; + let mut eq = eq; + let mut transcript = transcript; + + sumcheck_prove_plain(claim, r_batch,&mut p0,&mut p1, &mut q0,&mut q1,&mut eq,&mut transcript) + }, + BatchSize::SmallInput, + ) + }); + } +} + + +fn compute_next_layer( + p0: &[E], + p1: &[E], + q0: &[E], + q1: &[E], +) -> (Vec, Vec, Vec, Vec) { + let (p, q): (Vec, Vec) = p0 + .iter() + .zip(p1.iter().zip(q0.iter().zip(q1.iter()))) + .map(|(&p0, (&p1, (&q0, &q1)))| { + let p = p0 * q1 + p1 * q0; + let q = q0 * q1; + (p, q) + }) + .unzip(); + let mut p0 = Vec::with_capacity(p.len() / 2); + let mut p1 = Vec::with_capacity(p.len() / 2); + let mut q0 = Vec::with_capacity(q.len() / 2); + let mut q1 = Vec::with_capacity(q.len() / 2); + for chunk in p.chunks_exact(2) { + p0.push(chunk[0]); + p1.push(chunk[1]); + } + for chunk in q.chunks_exact(2) { + q0.push(chunk[0]); + q1.push(chunk[1]); + } + (p0, p1, q0, q1) +} + +fn setup_sum_check(log_size: usize) -> ( + E, + E, + MultiLinearPoly, + MultiLinearPoly, + MultiLinearPoly, + MultiLinearPoly, + MultiLinearPoly, +) { + let n = 1 << log_size; + let p0: Vec = rand_vector(n); + let p1: Vec = rand_vector(n); + let q0: Vec = rand_vector(n); + let q1: Vec = rand_vector(n); + + let (p0_nxt, p1_nxt, q0_nxt, q1_nxt) = compute_next_layer(&p0, &p1, &q0, &q1); + + let m = p0_nxt.len().trailing_zeros() as usize; + + let rand_pt = rand_vector(m + 1); + + let p0_nxt_eval_at_rand_pt = MultiLinearPoly::from_evaluations(p0_nxt).evaluate(&rand_pt[1..]); + let p1_nxt_eval_at_rand_pt = MultiLinearPoly::from_evaluations(p1_nxt).evaluate(&rand_pt[1..]); + let q0_nxt_eval_at_rand_pt = MultiLinearPoly::from_evaluations(q0_nxt).evaluate(&rand_pt[1..]); + let q1_nxt_eval_at_rand_pt = MultiLinearPoly::from_evaluations(q1_nxt).evaluate(&rand_pt[1..]); + + let p_nxt_eval_at_rand_pt = + (E::ONE - rand_pt[0]) * p0_nxt_eval_at_rand_pt + rand_pt[0] * p1_nxt_eval_at_rand_pt; + let q_nxt_eval_at_rand_pt = + (E::ONE - rand_pt[0]) * q0_nxt_eval_at_rand_pt + rand_pt[0] * q1_nxt_eval_at_rand_pt; + + let r_batch: E = rand_value(); + + let claim = p_nxt_eval_at_rand_pt + r_batch * q_nxt_eval_at_rand_pt; + + let p0 = MultiLinearPoly::from_evaluations(p0); + let p1 = MultiLinearPoly::from_evaluations(p1); + let q0 = MultiLinearPoly::from_evaluations(q0); + let q1 = MultiLinearPoly::from_evaluations(q1); + let eq = MultiLinearPoly::from_evaluations(EqFunction::new(rand_pt.into()).evaluations()); + + (claim, r_batch, p0, p1, q0, q1, eq) +} + +fn setup_sum_check_2(log_size: usize) -> ( + E, + E, + MultiLinearPoly, + MultiLinearPoly, + MultiLinearPoly, + MultiLinearPoly, + MultiLinearPoly, +) { + let n = 1 << log_size; + let p0: Vec = rand_vector(n); + let p1: Vec = rand_vector(n); + let q0: Vec = rand_vector(n); + let q1: Vec = rand_vector(n); + + // this will not generate the correct claim with overwhelming probability but should be fine + // for benchmarking + let rand_pt = rand_vector(log_size); + let r_batch: E = rand_value(); + let claim: E = rand_value(); + + let p0 = MultiLinearPoly::from_evaluations(p0); + let p1 = MultiLinearPoly::from_evaluations(p1); + let q0 = MultiLinearPoly::from_evaluations(q0); + let q1 = MultiLinearPoly::from_evaluations(q1); + let eq = MultiLinearPoly::from_evaluations(EqFunction::new(rand_pt.into()).evaluations()); + + (claim, r_batch, p0, p1, q0, q1, eq) +} + + +criterion_group!(group, sum_check_plain); +criterion_main!(group); diff --git a/sumcheck/src/lib.rs b/sumcheck/src/lib.rs index 933fe35c7..5e15bff4b 100644 --- a/sumcheck/src/lib.rs +++ b/sumcheck/src/lib.rs @@ -271,25 +271,9 @@ pub fn evaluate_composition_poly( eq_eval: E, r_sum_check: E, ) -> E { - let numerators = MultiLinearPoly::from_evaluations(numerators.to_vec()); - let denominators = MultiLinearPoly::from_evaluations(denominators.to_vec()); - - let (left_numerators, right_numerators) = numerators.project_least_significant_variable(); - let (left_denominators, right_denominators) = denominators.project_least_significant_variable(); - - left_numerators - .evaluations() - .iter() - .zip( - right_numerators.evaluations().iter().zip( - left_denominators - .evaluations() - .iter() - .zip(right_denominators.evaluations().iter().zip(eq_at_mu.iter())), - ), - ) - .map(|(p0, (p1, (q0, (q1, eq_w))))| { - *eq_w * comb_func(*p0, *p1, *q0, *q1, eq_eval, r_sum_check) - }) + numerators + .chunks(2) + .zip(denominators.chunks(2).zip(eq_at_mu.iter())) + .map(|(p, (q, eq_w))| *eq_w * comb_func(p[0], p[1], q[0], q[1], eq_eval, r_sum_check)) .fold(E::ZERO, |acc, x| acc + x) } diff --git a/sumcheck/src/multilinear.rs b/sumcheck/src/multilinear.rs index ad46c7b44..12e6a6803 100644 --- a/sumcheck/src/multilinear.rs +++ b/sumcheck/src/multilinear.rs @@ -184,8 +184,7 @@ pub struct EqFunction { impl EqFunction { /// Creates a new [EqFunction]. pub fn new(r: SmallVec<[E; MAX_EQ_SIZE]>) -> Self { - let tmp = r.into(); - EqFunction { r: tmp } + EqFunction { r } } /// Computes $\tilde{EQ}((r_0, ..., r_{{\nu} - 1}), (t_0, ..., t_{{\nu} - 1})))$. diff --git a/sumcheck/src/prover/high_degree.rs b/sumcheck/src/prover/high_degree.rs index 78e92de66..4e3b3cecb 100644 --- a/sumcheck/src/prover/high_degree.rs +++ b/sumcheck/src/prover/high_degree.rs @@ -327,13 +327,10 @@ fn sumcheck_round( ); // compute the evaluations at 2, ..., d_max points - evals_zero - .iter() - .zip(evals_one.iter().zip(deltas.iter_mut().zip(evals_x.iter_mut()))) - .for_each(|(a0, (a1, (delta, evx)))| { - *delta = *a1 - *a0; - *evx = *a1; - }); + for i in 0..num_ml { + deltas[i] = evals_one[i] - evals_zero[i]; + evals_x[i] = evals_one[i]; + } eq_delta = eq_at_one - eq_at_zero; eq_x = eq_at_one; @@ -417,13 +414,10 @@ fn sumcheck_round( ); // compute the evaluations at 2, ..., d_max points - evals_zero - .iter() - .zip(evals_one.iter().zip(deltas.iter_mut().zip(evals_x.iter_mut()))) - .for_each(|(a0, (a1, (delta, evx)))| { - *delta = *a1 - *a0; - *evx = *a1; - }); + for i in 0..num_ml { + deltas[i] = evals_one[i] - evals_zero[i]; + evals_x[i] = evals_one[i]; + } let eq_delta = eq_at_one - eq_at_zero; let mut eq_x = eq_at_one; @@ -466,7 +460,7 @@ fn sumcheck_round( } /// Reduces an old claim to a new claim using the round challenge. -pub fn reduce_claim( +fn reduce_claim( current_poly: &RoundProof, current_round_claim: SumCheckRoundClaim, round_challenge: E, diff --git a/sumcheck/src/prover/plain.rs b/sumcheck/src/prover/plain.rs index 133bde11b..5e0f9379b 100644 --- a/sumcheck/src/prover/plain.rs +++ b/sumcheck/src/prover/plain.rs @@ -4,10 +4,10 @@ // LICENSE file in the root directory of this source tree. use crypto::{ElementHasher, RandomCoin}; -use smallvec::smallvec; use math::FieldElement; #[cfg(feature = "concurrent")] pub use rayon::prelude::*; +use smallvec::smallvec; use super::SumCheckProverError; use crate::{ @@ -46,10 +46,7 @@ use crate::{ /// /// [1]: https://eprint.iacr.org/2023/1284 #[allow(clippy::too_many_arguments)] -pub fn sumcheck_prove_plain< - E: FieldElement, - H: ElementHasher, ->( +pub fn sumcheck_prove_plain>( claim: E, r_batch: E, p0: &mut MultiLinearPoly, @@ -145,11 +142,7 @@ pub fn sumcheck_prove_plain< |(a0, b0, c0), (a1, b1, c1)| (a0 + a1, b0 + b1, c0 + c1), ); - let evals = smallvec![ - eval_point_1, - eval_point_2, - eval_point_3, - ]; + let evals = smallvec![eval_point_1, eval_point_2, eval_point_3,]; let poly = CompressedUnivariatePolyEvals(evals); let round_poly_coefs = poly.to_poly(claim); diff --git a/sumcheck/src/verifier/mod.rs b/sumcheck/src/verifier/mod.rs index 6b4ee2cd4..d1cfae3a4 100644 --- a/sumcheck/src/verifier/mod.rs +++ b/sumcheck/src/verifier/mod.rs @@ -10,50 +10,20 @@ use crypto::{ElementHasher, RandomCoin}; use math::FieldElement; use crate::{ - evaluate_composition_poly, EqFunction, FinalLayerProof, FinalOpeningClaim, RoundProof, - SumCheckProof, SumCheckRoundClaim, + comb_func, evaluate_composition_poly, EqFunction, FinalLayerProof, FinalOpeningClaim, + RoundProof, SumCheckProof, SumCheckRoundClaim, }; -/// Verifies a round of the sum-check protocol without executing the final check. -pub fn verify_rounds( - claim: E, - round_proofs: &[RoundProof], - coin: &mut C, -) -> Result, SumCheckVerifierError> -where - E: FieldElement, - C: RandomCoin, - H: ElementHasher, -{ - let mut round_claim = claim; - let mut evaluation_point = vec![]; - for round_proof in round_proofs { - let round_poly_coefs = round_proof.round_poly_coefs.clone(); - coin.reseed(H::hash_elements(&round_poly_coefs.0)); - - let r = coin.draw().map_err(|_| SumCheckVerifierError::FailedToGenerateChallenge)?; - - round_claim = round_proof.round_poly_coefs.evaluate_using_claim(&round_claim, &r); - evaluation_point.push(r); - } - - Ok(SumCheckRoundClaim { - eval_point: evaluation_point, - claim: round_claim, - }) -} - /// Verifies sum-check proofs, as part of the GKR proof, for all GKR layers except for the last one /// i.e., the circuit input layer. pub fn verify_sum_check_intermediate_layers< E: FieldElement, - C: RandomCoin, H: ElementHasher, >( proof: &SumCheckProof, gkr_eval_point: &[E], claim: (E, E), - transcript: &mut C, + transcript: &mut impl RandomCoin, ) -> Result, SumCheckVerifierError> { // generate challenge to batch sum-checks transcript.reseed(H::hash_elements(&[claim.0, claim.1])); @@ -76,7 +46,7 @@ pub fn verify_sum_check_intermediate_layers< let eq = EqFunction::new(gkr_eval_point.into()).evaluate(&openings_claim.eval_point); - if (p0 * q1 + p1 * q0 + r_batch * q0 * q1) * eq != final_round_claim.claim { + if comb_func(p0, p1, q0, q1, eq, r_batch) != final_round_claim.claim { return Err(SumCheckVerifierError::FinalEvaluationCheckFailed); } @@ -86,17 +56,13 @@ pub fn verify_sum_check_intermediate_layers< /// Verifies the final sum-check proof i.e., the one for the input layer, including the final check, /// and returns a [`FinalOpeningClaim`] to the STARK verifier in order to verify the correctness of /// the openings. -pub fn verify_sum_check_input_layer< - E: FieldElement, - C: RandomCoin, - H: ElementHasher, ->( +pub fn verify_sum_check_input_layer>( evaluator: &impl LogUpGkrEvaluator, proof: &FinalLayerProof, log_up_randomness: Vec, gkr_eval_point: &[E], claim: (E, E), - transcript: &mut C, + transcript: &mut impl RandomCoin, ) -> Result, SumCheckVerifierError> { let FinalLayerProof { proof } = proof; @@ -144,6 +110,34 @@ pub fn verify_sum_check_input_layer< } } +/// Verifies a round of the sum-check protocol without executing the final check. +fn verify_rounds( + claim: E, + round_proofs: &[RoundProof], + coin: &mut impl RandomCoin, +) -> Result, SumCheckVerifierError> +where + E: FieldElement, + H: ElementHasher, +{ + let mut round_claim = claim; + let mut evaluation_point = vec![]; + for round_proof in round_proofs { + let round_poly_coefs = round_proof.round_poly_coefs.clone(); + coin.reseed(H::hash_elements(&round_poly_coefs.0)); + + let r = coin.draw().map_err(|_| SumCheckVerifierError::FailedToGenerateChallenge)?; + + round_claim = round_proof.round_poly_coefs.evaluate_using_claim(&round_claim, &r); + evaluation_point.push(r); + } + + Ok(SumCheckRoundClaim { + eval_point: evaluation_point, + claim: round_claim, + }) +} + #[derive(Debug, thiserror::Error)] pub enum SumCheckVerifierError { #[error("the final evaluation check of sum-check failed")] From 2b64dbff498b656fb20df7481ed0fa5c596ed3da Mon Sep 17 00:00:00 2001 From: Al-Kindi-0 <82364884+Al-Kindi-0@users.noreply.github.com> Date: Mon, 19 Aug 2024 16:15:09 +0200 Subject: [PATCH 18/28] feat: simplify sum-check bench --- sumcheck/benches/sum_check_plain.rs | 77 +---------------------------- 1 file changed, 1 insertion(+), 76 deletions(-) diff --git a/sumcheck/benches/sum_check_plain.rs b/sumcheck/benches/sum_check_plain.rs index 140ce9509..f3b72fcfd 100644 --- a/sumcheck/benches/sum_check_plain.rs +++ b/sumcheck/benches/sum_check_plain.rs @@ -26,7 +26,7 @@ fn sum_check_plain(c: &mut Criterion) { b.iter_batched( || { let transcript = DefaultRandomCoin::>::new(&vec![BaseElement::ZERO; 4]); - (setup_sum_check_2::(log_poly_size), transcript) + (setup_sum_check::(log_poly_size), transcript) }, |((claim, r_batch, p0, p1, q0, q1, eq), transcript)| { let mut p0 = p0; @@ -44,37 +44,6 @@ fn sum_check_plain(c: &mut Criterion) { } } - -fn compute_next_layer( - p0: &[E], - p1: &[E], - q0: &[E], - q1: &[E], -) -> (Vec, Vec, Vec, Vec) { - let (p, q): (Vec, Vec) = p0 - .iter() - .zip(p1.iter().zip(q0.iter().zip(q1.iter()))) - .map(|(&p0, (&p1, (&q0, &q1)))| { - let p = p0 * q1 + p1 * q0; - let q = q0 * q1; - (p, q) - }) - .unzip(); - let mut p0 = Vec::with_capacity(p.len() / 2); - let mut p1 = Vec::with_capacity(p.len() / 2); - let mut q0 = Vec::with_capacity(q.len() / 2); - let mut q1 = Vec::with_capacity(q.len() / 2); - for chunk in p.chunks_exact(2) { - p0.push(chunk[0]); - p1.push(chunk[1]); - } - for chunk in q.chunks_exact(2) { - q0.push(chunk[0]); - q1.push(chunk[1]); - } - (p0, p1, q0, q1) -} - fn setup_sum_check(log_size: usize) -> ( E, E, @@ -90,50 +59,6 @@ fn setup_sum_check(log_size: usize) -> ( let q0: Vec = rand_vector(n); let q1: Vec = rand_vector(n); - let (p0_nxt, p1_nxt, q0_nxt, q1_nxt) = compute_next_layer(&p0, &p1, &q0, &q1); - - let m = p0_nxt.len().trailing_zeros() as usize; - - let rand_pt = rand_vector(m + 1); - - let p0_nxt_eval_at_rand_pt = MultiLinearPoly::from_evaluations(p0_nxt).evaluate(&rand_pt[1..]); - let p1_nxt_eval_at_rand_pt = MultiLinearPoly::from_evaluations(p1_nxt).evaluate(&rand_pt[1..]); - let q0_nxt_eval_at_rand_pt = MultiLinearPoly::from_evaluations(q0_nxt).evaluate(&rand_pt[1..]); - let q1_nxt_eval_at_rand_pt = MultiLinearPoly::from_evaluations(q1_nxt).evaluate(&rand_pt[1..]); - - let p_nxt_eval_at_rand_pt = - (E::ONE - rand_pt[0]) * p0_nxt_eval_at_rand_pt + rand_pt[0] * p1_nxt_eval_at_rand_pt; - let q_nxt_eval_at_rand_pt = - (E::ONE - rand_pt[0]) * q0_nxt_eval_at_rand_pt + rand_pt[0] * q1_nxt_eval_at_rand_pt; - - let r_batch: E = rand_value(); - - let claim = p_nxt_eval_at_rand_pt + r_batch * q_nxt_eval_at_rand_pt; - - let p0 = MultiLinearPoly::from_evaluations(p0); - let p1 = MultiLinearPoly::from_evaluations(p1); - let q0 = MultiLinearPoly::from_evaluations(q0); - let q1 = MultiLinearPoly::from_evaluations(q1); - let eq = MultiLinearPoly::from_evaluations(EqFunction::new(rand_pt.into()).evaluations()); - - (claim, r_batch, p0, p1, q0, q1, eq) -} - -fn setup_sum_check_2(log_size: usize) -> ( - E, - E, - MultiLinearPoly, - MultiLinearPoly, - MultiLinearPoly, - MultiLinearPoly, - MultiLinearPoly, -) { - let n = 1 << log_size; - let p0: Vec = rand_vector(n); - let p1: Vec = rand_vector(n); - let q0: Vec = rand_vector(n); - let q1: Vec = rand_vector(n); - // this will not generate the correct claim with overwhelming probability but should be fine // for benchmarking let rand_pt = rand_vector(log_size); From ea7cb84ccc9b0e3aa0a430626d4249666624a33a Mon Sep 17 00:00:00 2001 From: Al-Kindi-0 <82364884+Al-Kindi-0@users.noreply.github.com> Date: Mon, 19 Aug 2024 17:15:14 +0200 Subject: [PATCH 19/28] feat: add sum-check benchmarks --- sumcheck/Cargo.toml | 4 + sumcheck/benches/sum_check_high_degree.rs | 160 ++++++++++++++++++++++ sumcheck/benches/sum_check_plain.rs | 23 +++- 3 files changed, 182 insertions(+), 5 deletions(-) create mode 100644 sumcheck/benches/sum_check_high_degree.rs diff --git a/sumcheck/Cargo.toml b/sumcheck/Cargo.toml index c1cafba60..97dc79f3a 100644 --- a/sumcheck/Cargo.toml +++ b/sumcheck/Cargo.toml @@ -16,6 +16,10 @@ rust-version = "1.78" name = "sum_check_plain" harness = false +[[bench]] +name = "sum_check_high_degree" +harness = false + [[bench]] name = "eq_function" harness = false diff --git a/sumcheck/benches/sum_check_high_degree.rs b/sumcheck/benches/sum_check_high_degree.rs new file mode 100644 index 000000000..19c8feb99 --- /dev/null +++ b/sumcheck/benches/sum_check_high_degree.rs @@ -0,0 +1,160 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +use std::{marker::PhantomData, time::Duration}; + +use air::{EvaluationFrame, LogUpGkrEvaluator, LogUpGkrOracle}; +use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion}; +use crypto::{hashers::Blake3_192, DefaultRandomCoin, RandomCoin}; +use math::{fields::f64::BaseElement, ExtensionOf, FieldElement}; +use rand_utils::{rand_value, rand_vector}; +#[cfg(feature = "concurrent")] +pub use rayon::prelude::*; +use winter_sumcheck::{sum_check_prove_higher_degree, MultiLinearPoly}; + +const LOG_POLY_SIZE: [usize; 2] = [18, 20]; + +fn sum_check_high_degree(c: &mut Criterion) { + let mut group = c.benchmark_group("Sum-check prover high degree"); + group.sample_size(10); + group.measurement_time(Duration::from_secs(10)); + + for &log_poly_size in LOG_POLY_SIZE.iter() { + group.bench_function(BenchmarkId::new("", log_poly_size), |b| { + b.iter_batched( + || { + let logup_randomness = rand_vector(1); + let evaluator = PlainLogUpGkrEval::::default(); + let transcript = DefaultRandomCoin::>::new(&vec![ + BaseElement::ZERO; + 4 + ]); + ( + setup_sum_check::(log_poly_size), + evaluator, + logup_randomness, + transcript, + ) + }, + |( + (claim, r_batch, rand_pt, (ml0, ml1, ml2, ml3, ml4)), + evaluator, + logup_randomness, + transcript, + )| { + let mut mls = vec![ml0, ml1, ml2, ml3, ml4]; + let mut transcript = transcript; + + sum_check_prove_higher_degree( + &evaluator, + rand_pt, + claim, + r_batch, + logup_randomness, + &mut mls, + &mut transcript, + ) + }, + BatchSize::SmallInput, + ) + }); + } +} + +fn setup_sum_check( + log_size: usize, +) -> ( + E, + E, + Vec, + ( + MultiLinearPoly, + MultiLinearPoly, + MultiLinearPoly, + MultiLinearPoly, + MultiLinearPoly, + ), +) { + let n = 1 << log_size; + let table = MultiLinearPoly::from_evaluations(rand_vector(n)); + let multiplicity = MultiLinearPoly::from_evaluations(rand_vector(n)); + let values_0 = MultiLinearPoly::from_evaluations(rand_vector(n)); + let values_1 = MultiLinearPoly::from_evaluations(rand_vector(n)); + let values_2 = MultiLinearPoly::from_evaluations(rand_vector(n)); + + // this will not generate the correct claim with overwhelming probability but should be fine + // for benchmarking + let rand_pt: Vec = rand_vector(log_size + 2); + let r_batch: E = rand_value(); + let claim: E = rand_value(); + + (claim, r_batch, rand_pt, (table, multiplicity, values_0, values_1, values_2)) +} + +#[derive(Clone, Default)] +pub struct PlainLogUpGkrEval { + _field: PhantomData, +} + +impl LogUpGkrEvaluator for PlainLogUpGkrEval { + type BaseField = BaseElement; + + type PublicInputs = (); + + fn get_oracles(&self) -> Vec> { + let committed_0 = LogUpGkrOracle::CurrentRow(0); + let committed_1 = LogUpGkrOracle::CurrentRow(1); + let committed_2 = LogUpGkrOracle::CurrentRow(2); + let committed_3 = LogUpGkrOracle::CurrentRow(3); + let committed_4 = LogUpGkrOracle::CurrentRow(4); + vec![committed_0, committed_1, committed_2, committed_3, committed_4] + } + + fn get_num_rand_values(&self) -> usize { + 1 + } + + fn get_num_fractions(&self) -> usize { + 4 + } + + fn max_degree(&self) -> usize { + 3 + } + + fn build_query(&self, frame: &EvaluationFrame, _periodic_values: &[E], query: &mut [E]) + where + E: FieldElement, + { + query.iter_mut().zip(frame.current().iter()).for_each(|(q, f)| *q = *f); + } + + fn evaluate_query( + &self, + query: &[F], + rand_values: &[E], + numerator: &mut [E], + denominator: &mut [E], + ) where + F: FieldElement, + E: FieldElement + ExtensionOf, + { + assert_eq!(numerator.len(), 4); + assert_eq!(denominator.len(), 4); + assert_eq!(query.len(), 5); + numerator[0] = E::from(query[1]); + numerator[1] = E::ONE; + numerator[2] = E::ONE; + numerator[3] = E::ONE; + + denominator[0] = rand_values[0] - E::from(query[0]); + denominator[1] = -(rand_values[0] - E::from(query[2])); + denominator[2] = -(rand_values[0] - E::from(query[3])); + denominator[3] = -(rand_values[0] - E::from(query[4])); + } +} + +criterion_group!(group, sum_check_high_degree); +criterion_main!(group); diff --git a/sumcheck/benches/sum_check_plain.rs b/sumcheck/benches/sum_check_plain.rs index f3b72fcfd..fb79d8f53 100644 --- a/sumcheck/benches/sum_check_plain.rs +++ b/sumcheck/benches/sum_check_plain.rs @@ -13,7 +13,6 @@ use rand_utils::{rand_value, rand_vector}; pub use rayon::prelude::*; use winter_sumcheck::{sumcheck_prove_plain, EqFunction, MultiLinearPoly}; - const LOG_POLY_SIZE: [usize; 2] = [18, 20]; fn sum_check_plain(c: &mut Criterion) { @@ -25,7 +24,11 @@ fn sum_check_plain(c: &mut Criterion) { group.bench_function(BenchmarkId::new("", log_poly_size), |b| { b.iter_batched( || { - let transcript = DefaultRandomCoin::>::new(&vec![BaseElement::ZERO; 4]); + let transcript = + DefaultRandomCoin::>::new(&vec![ + BaseElement::ZERO; + 4 + ]); (setup_sum_check::(log_poly_size), transcript) }, |((claim, r_batch, p0, p1, q0, q1, eq), transcript)| { @@ -36,7 +39,16 @@ fn sum_check_plain(c: &mut Criterion) { let mut eq = eq; let mut transcript = transcript; - sumcheck_prove_plain(claim, r_batch,&mut p0,&mut p1, &mut q0,&mut q1,&mut eq,&mut transcript) + sumcheck_prove_plain( + claim, + r_batch, + &mut p0, + &mut p1, + &mut q0, + &mut q1, + &mut eq, + &mut transcript, + ) }, BatchSize::SmallInput, ) @@ -44,7 +56,9 @@ fn sum_check_plain(c: &mut Criterion) { } } -fn setup_sum_check(log_size: usize) -> ( +fn setup_sum_check( + log_size: usize, +) -> ( E, E, MultiLinearPoly, @@ -74,6 +88,5 @@ fn setup_sum_check(log_size: usize) -> ( (claim, r_batch, p0, p1, q0, q1, eq) } - criterion_group!(group, sum_check_plain); criterion_main!(group); From dae27e64db3e77baaebdd58fc1fd3f504db1b78b Mon Sep 17 00:00:00 2001 From: Al-Kindi-0 <82364884+Al-Kindi-0@users.noreply.github.com> Date: Mon, 19 Aug 2024 23:40:04 +0200 Subject: [PATCH 20/28] doc: improve documentation --- air/src/air/logup_gkr.rs | 4 ++++ sumcheck/README.md | 10 +++++++++- sumcheck/benches/sum_check_plain.rs | 3 +-- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/air/src/air/logup_gkr.rs b/air/src/air/logup_gkr.rs index bbdd150b0..1ca0b08b5 100644 --- a/air/src/air/logup_gkr.rs +++ b/air/src/air/logup_gkr.rs @@ -26,6 +26,10 @@ pub trait LogUpGkrEvaluator: Clone + Sync { fn get_num_fractions(&self) -> usize; /// Returns the maximal degree of the multi-variate associated to the input layer. + /// + /// This is equal to the max of $1 + deg_k(\text{numerator}_i) * deg_k(\text{denominator}_j)$ where + /// $i$ and $j$ range over the number of numerators and denominators, respectively, and $deg_k$ + /// is the degree of a multi-variate polynomial in its $k$-th variable. fn max_degree(&self) -> usize; /// Builds a query from the provided main trace frame and periodic values. diff --git a/sumcheck/README.md b/sumcheck/README.md index 3a705b2e0..be6734aae 100644 --- a/sumcheck/README.md +++ b/sumcheck/README.md @@ -1,5 +1,13 @@ # Winter sum-check -This crate contains an implementation of the sum-check protocol intended to be used for LogUp-GKR by the Winterfell STARK prover and verifier. +This crate contains an implementation of the sum-check protocol intended to be used for [LogUp-GKR](https://eprint.iacr.org/2023/1284) by the Winterfell STARK prover and verifier. + +The crate provides two implementations of the sum-check protocol: + +* An implementation for the sum-check protocol as used in [LogUp-GKR](https://eprint.iacr.org/2023/1284). +* An implementation which generalizes the previous one to the case where the numerators and denominators appearing in the fractional sum-checks in Section 3 of [LogUp-GKR](https://eprint.iacr.org/2023/1284) can be non-linear compositions of multi-linear polynomials. + +The first implementation is intended to be used by the GKR protocol for proving the correct evaluation of all of the layers of the fractionl sum circuit except for the input layer. The second implementation is intended to be used for proving the correct evaluation of the input layer. + ## Crate features This crate can be compiled with the following features: diff --git a/sumcheck/benches/sum_check_plain.rs b/sumcheck/benches/sum_check_plain.rs index fb79d8f53..de1acfa8f 100644 --- a/sumcheck/benches/sum_check_plain.rs +++ b/sumcheck/benches/sum_check_plain.rs @@ -24,8 +24,7 @@ fn sum_check_plain(c: &mut Criterion) { group.bench_function(BenchmarkId::new("", log_poly_size), |b| { b.iter_batched( || { - let transcript = - DefaultRandomCoin::>::new(&vec![ + let transcript = DefaultRandomCoin::>::new(&vec![ BaseElement::ZERO; 4 ]); From 9543e1ab8c3ee3548fab630fae9f3a49d4982016 Mon Sep 17 00:00:00 2001 From: Al-Kindi-0 <82364884+Al-Kindi-0@users.noreply.github.com> Date: Tue, 20 Aug 2024 11:10:46 +0200 Subject: [PATCH 21/28] chore: pacify clippy --- air/src/air/logup_gkr.rs | 12 +++++++----- sumcheck/benches/sum_check_plain.rs | 7 +++---- sumcheck/src/lib.rs | 1 + 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/air/src/air/logup_gkr.rs b/air/src/air/logup_gkr.rs index 1ca0b08b5..842c5e392 100644 --- a/air/src/air/logup_gkr.rs +++ b/air/src/air/logup_gkr.rs @@ -5,9 +5,10 @@ use alloc::vec::Vec; -use super::EvaluationFrame; use math::{ExtensionOf, FieldElement, StarkField, ToElements}; +use super::EvaluationFrame; + pub trait LogUpGkrEvaluator: Clone + Sync { /// Defines the base field of the evaluator. type BaseField: StarkField; @@ -26,7 +27,7 @@ pub trait LogUpGkrEvaluator: Clone + Sync { fn get_num_fractions(&self) -> usize; /// Returns the maximal degree of the multi-variate associated to the input layer. - /// + /// /// This is equal to the max of $1 + deg_k(\text{numerator}_i) * deg_k(\text{denominator}_j)$ where /// $i$ and $j$ range over the number of numerators and denominators, respectively, and $deg_k$ /// is the degree of a multi-variate polynomial in its $k$-th variable. @@ -67,9 +68,10 @@ pub trait LogUpGkrEvaluator: Clone + Sync { /// expected claim. fn compute_claim(&self, _inputs: &Self::PublicInputs, _rand_values: &[E]) -> E where - E: FieldElement{ - E::ZERO - } + E: FieldElement, + { + E::ZERO + } } #[derive(Clone, Debug, PartialEq, PartialOrd, Eq, Ord)] diff --git a/sumcheck/benches/sum_check_plain.rs b/sumcheck/benches/sum_check_plain.rs index de1acfa8f..07caf05ec 100644 --- a/sumcheck/benches/sum_check_plain.rs +++ b/sumcheck/benches/sum_check_plain.rs @@ -24,10 +24,8 @@ fn sum_check_plain(c: &mut Criterion) { group.bench_function(BenchmarkId::new("", log_poly_size), |b| { b.iter_batched( || { - let transcript = DefaultRandomCoin::>::new(&vec![ - BaseElement::ZERO; - 4 - ]); + let transcript = + DefaultRandomCoin::>::new(&[BaseElement::ZERO; 4]); (setup_sum_check::(log_poly_size), transcript) }, |((claim, r_batch, p0, p1, q0, q1, eq), transcript)| { @@ -55,6 +53,7 @@ fn sum_check_plain(c: &mut Criterion) { } } +#[allow(clippy::too_many_arguments)] fn setup_sum_check( log_size: usize, ) -> ( diff --git a/sumcheck/src/lib.rs b/sumcheck/src/lib.rs index 5e15bff4b..f30db974c 100644 --- a/sumcheck/src/lib.rs +++ b/sumcheck/src/lib.rs @@ -259,6 +259,7 @@ where /// /// This is the result of batching the `p_k` and `q_k` of section 3.2 in /// https://eprint.iacr.org/2023/1284.pdf. +#[inline(always)] fn comb_func(p0: E, p1: E, q0: E, q1: E, eq: E, r_batch: E) -> E { (p0 * q1 + p1 * q0 + r_batch * q0 * q1) * eq } From 60c85911bff77d99901fb0c9a38cae79cf65e949 Mon Sep 17 00:00:00 2001 From: Al-Kindi-0 <82364884+Al-Kindi-0@users.noreply.github.com> Date: Tue, 20 Aug 2024 11:14:08 +0200 Subject: [PATCH 22/28] chore: pacify clippy --- sumcheck/benches/sum_check_high_degree.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/sumcheck/benches/sum_check_high_degree.rs b/sumcheck/benches/sum_check_high_degree.rs index 19c8feb99..d904033ec 100644 --- a/sumcheck/benches/sum_check_high_degree.rs +++ b/sumcheck/benches/sum_check_high_degree.rs @@ -27,10 +27,8 @@ fn sum_check_high_degree(c: &mut Criterion) { || { let logup_randomness = rand_vector(1); let evaluator = PlainLogUpGkrEval::::default(); - let transcript = DefaultRandomCoin::>::new(&vec![ - BaseElement::ZERO; - 4 - ]); + let transcript = + DefaultRandomCoin::>::new(&[BaseElement::ZERO; 4]); ( setup_sum_check::(log_poly_size), evaluator, @@ -63,6 +61,7 @@ fn sum_check_high_degree(c: &mut Criterion) { } } +#[allow(clippy::too_many_arguments)] fn setup_sum_check( log_size: usize, ) -> ( From 7338cafd408ba080a699dbc1d9387803e1ec35ee Mon Sep 17 00:00:00 2001 From: Al-Kindi-0 <82364884+Al-Kindi-0@users.noreply.github.com> Date: Tue, 20 Aug 2024 11:19:37 +0200 Subject: [PATCH 23/28] chore: pacify clippy --- sumcheck/benches/sum_check_high_degree.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/sumcheck/benches/sum_check_high_degree.rs b/sumcheck/benches/sum_check_high_degree.rs index d904033ec..8ed78b8f3 100644 --- a/sumcheck/benches/sum_check_high_degree.rs +++ b/sumcheck/benches/sum_check_high_degree.rs @@ -62,6 +62,7 @@ fn sum_check_high_degree(c: &mut Criterion) { } #[allow(clippy::too_many_arguments)] +#[allow(clippy::type_complexity)] fn setup_sum_check( log_size: usize, ) -> ( From 43c7441f46896315fb63b7322cca4fbc052d251e Mon Sep 17 00:00:00 2001 From: Al-Kindi-0 <82364884+Al-Kindi-0@users.noreply.github.com> Date: Tue, 20 Aug 2024 11:21:36 +0200 Subject: [PATCH 24/28] chore: pacify clippy --- sumcheck/benches/sum_check_plain.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/sumcheck/benches/sum_check_plain.rs b/sumcheck/benches/sum_check_plain.rs index 07caf05ec..277d1597d 100644 --- a/sumcheck/benches/sum_check_plain.rs +++ b/sumcheck/benches/sum_check_plain.rs @@ -54,6 +54,7 @@ fn sum_check_plain(c: &mut Criterion) { } #[allow(clippy::too_many_arguments)] +#[allow(clippy::type_complexity)] fn setup_sum_check( log_size: usize, ) -> ( From f85e44f5ee39d713060cbc2067e99e4599b86a4d Mon Sep 17 00:00:00 2001 From: Al-Kindi-0 <82364884+Al-Kindi-0@users.noreply.github.com> Date: Tue, 20 Aug 2024 18:53:55 +0200 Subject: [PATCH 25/28] feat: reduce memory allocation in bind least signi --- air/src/air/logup_gkr.rs | 2 +- sumcheck/benches/sum_check_high_degree.rs | 4 +-- sumcheck/benches/sum_check_plain.rs | 43 +++++------------------ sumcheck/src/multilinear.rs | 29 ++++++++++----- sumcheck/src/prover/high_degree.rs | 6 ++-- sumcheck/src/prover/plain.rs | 11 +++--- 6 files changed, 43 insertions(+), 52 deletions(-) diff --git a/air/src/air/logup_gkr.rs b/air/src/air/logup_gkr.rs index 842c5e392..c56316751 100644 --- a/air/src/air/logup_gkr.rs +++ b/air/src/air/logup_gkr.rs @@ -53,7 +53,7 @@ pub trait LogUpGkrEvaluator: Clone + Sync { fn evaluate_query( &self, query: &[F], - rand_values: &[E], + logup_randomness: &[E], numerators: &mut [E], denominators: &mut [E], ) where diff --git a/sumcheck/benches/sum_check_high_degree.rs b/sumcheck/benches/sum_check_high_degree.rs index 8ed78b8f3..3db6a37e3 100644 --- a/sumcheck/benches/sum_check_high_degree.rs +++ b/sumcheck/benches/sum_check_high_degree.rs @@ -42,7 +42,7 @@ fn sum_check_high_degree(c: &mut Criterion) { logup_randomness, transcript, )| { - let mut mls = vec![ml0, ml1, ml2, ml3, ml4]; + let mls = vec![ml0, ml1, ml2, ml3, ml4]; let mut transcript = transcript; sum_check_prove_higher_degree( @@ -51,7 +51,7 @@ fn sum_check_high_degree(c: &mut Criterion) { claim, r_batch, logup_randomness, - &mut mls, + mls, &mut transcript, ) }, diff --git a/sumcheck/benches/sum_check_plain.rs b/sumcheck/benches/sum_check_plain.rs index 277d1597d..14fd859ce 100644 --- a/sumcheck/benches/sum_check_plain.rs +++ b/sumcheck/benches/sum_check_plain.rs @@ -28,24 +28,11 @@ fn sum_check_plain(c: &mut Criterion) { DefaultRandomCoin::>::new(&[BaseElement::ZERO; 4]); (setup_sum_check::(log_poly_size), transcript) }, - |((claim, r_batch, p0, p1, q0, q1, eq), transcript)| { - let mut p0 = p0; - let mut p1 = p1; - let mut q0 = q0; - let mut q1 = q1; + |((claim, r_batch, p, q, eq), transcript)| { let mut eq = eq; let mut transcript = transcript; - sumcheck_prove_plain( - claim, - r_batch, - &mut p0, - &mut p1, - &mut q0, - &mut q1, - &mut eq, - &mut transcript, - ) + sumcheck_prove_plain(claim, r_batch, p, q, &mut eq, &mut transcript) }, BatchSize::SmallInput, ) @@ -57,20 +44,10 @@ fn sum_check_plain(c: &mut Criterion) { #[allow(clippy::type_complexity)] fn setup_sum_check( log_size: usize, -) -> ( - E, - E, - MultiLinearPoly, - MultiLinearPoly, - MultiLinearPoly, - MultiLinearPoly, - MultiLinearPoly, -) { - let n = 1 << log_size; - let p0: Vec = rand_vector(n); - let p1: Vec = rand_vector(n); - let q0: Vec = rand_vector(n); - let q1: Vec = rand_vector(n); +) -> (E, E, MultiLinearPoly, MultiLinearPoly, MultiLinearPoly) { + let n = 1 << (log_size + 1); + let p: Vec = rand_vector(n); + let q: Vec = rand_vector(n); // this will not generate the correct claim with overwhelming probability but should be fine // for benchmarking @@ -78,13 +55,11 @@ fn setup_sum_check( let r_batch: E = rand_value(); let claim: E = rand_value(); - let p0 = MultiLinearPoly::from_evaluations(p0); - let p1 = MultiLinearPoly::from_evaluations(p1); - let q0 = MultiLinearPoly::from_evaluations(q0); - let q1 = MultiLinearPoly::from_evaluations(q1); + let p = MultiLinearPoly::from_evaluations(p); + let q = MultiLinearPoly::from_evaluations(q); let eq = MultiLinearPoly::from_evaluations(EqFunction::new(rand_pt.into()).evaluations()); - (claim, r_batch, p0, p1, q0, q1, eq) + (claim, r_batch, p, q, eq) } criterion_group!(group, sum_check_plain); diff --git a/sumcheck/src/multilinear.rs b/sumcheck/src/multilinear.rs index 12e6a6803..110ef1fa7 100644 --- a/sumcheck/src/multilinear.rs +++ b/sumcheck/src/multilinear.rs @@ -94,15 +94,28 @@ impl MultiLinearPoly { /// Given the multilinear polynomial $f(y_0, y_1, ..., y_{{\nu} - 1})$, returns two polynomials: /// $f(0, y_1, ..., y_{{\nu} - 1})$ and $f(1, y_1, ..., y_{{\nu} - 1})$. - pub fn project_least_significant_variable(&self) -> (Self, Self) { - let mut p0 = Vec::with_capacity(self.num_evaluations() / 2); - let mut p1 = Vec::with_capacity(self.num_evaluations() / 2); - for chunk in self.evaluations.chunks_exact(2) { - p0.push(chunk[0]); - p1.push(chunk[1]); - } + pub fn project_least_significant_variable(mut self) -> (Self, Self) { + let odds: Vec = self + .evaluations + .iter() + .enumerate() + .filter_map(|(idx, x)| if idx % 2 == 1 { Some(*x) } else { None }) + .collect(); + + // Builds the evens multilinear from the current `self.evaluations` buffer, which saves an + // allocation. + let evens = { + let evens_size = self.num_evaluations() / 2; + for write_idx in 0..evens_size { + let read_idx = write_idx * 2; + self.evaluations[write_idx] = self.evaluations[read_idx]; + } + self.evaluations.truncate(evens_size); + + self.evaluations + }; - (MultiLinearPoly::from_evaluations(p0), MultiLinearPoly::from_evaluations(p1)) + (Self::from_evaluations(evens), Self::from_evaluations(odds)) } } diff --git a/sumcheck/src/prover/high_degree.rs b/sumcheck/src/prover/high_degree.rs index 4e3b3cecb..a96adee4c 100644 --- a/sumcheck/src/prover/high_degree.rs +++ b/sumcheck/src/prover/high_degree.rs @@ -159,7 +159,7 @@ pub fn sum_check_prove_higher_degree< claim: E, r_sum_check: E, log_up_randomness: Vec, - mls: &mut [MultiLinearPoly], + mut mls: Vec>, coin: &mut impl RandomCoin, ) -> Result, SumCheckProverError> { let num_rounds = mls[0].num_variables(); @@ -177,7 +177,7 @@ pub fn sum_check_prove_higher_degree< // run the first round of the protocol let round_poly_evals = - sumcheck_round(&eq_mu, evaluator, &eq_nu, mls, &log_up_randomness, r_sum_check); + sumcheck_round(&eq_mu, evaluator, &eq_nu, &mls, &log_up_randomness, r_sum_check); let round_poly_coefs = round_poly_evals.to_poly(current_round_claim.claim); // reseed with the s_0 polynomial @@ -201,7 +201,7 @@ pub fn sum_check_prove_higher_degree< // run the i-th round of the protocol using the folded multi-linears for the new reduced // claim. This basically computes the s_i polynomial. let round_poly_evals = - sumcheck_round(&eq_mu, evaluator, &eq_nu, mls, &log_up_randomness, r_sum_check); + sumcheck_round(&eq_mu, evaluator, &eq_nu, &mls, &log_up_randomness, r_sum_check); // update the claim current_round_claim = new_round_claim; diff --git a/sumcheck/src/prover/plain.rs b/sumcheck/src/prover/plain.rs index 5e0f9379b..97ce441be 100644 --- a/sumcheck/src/prover/plain.rs +++ b/sumcheck/src/prover/plain.rs @@ -49,10 +49,8 @@ use crate::{ pub fn sumcheck_prove_plain>( claim: E, r_batch: E, - p0: &mut MultiLinearPoly, - p1: &mut MultiLinearPoly, - q0: &mut MultiLinearPoly, - q1: &mut MultiLinearPoly, + p: MultiLinearPoly, + q: MultiLinearPoly, eq: &mut MultiLinearPoly, transcript: &mut impl RandomCoin, ) -> Result, SumCheckProverError> { @@ -61,6 +59,11 @@ pub fn sumcheck_prove_plain Date: Tue, 20 Aug 2024 19:01:51 +0200 Subject: [PATCH 26/28] doc: add docs to logup evaluator trait --- air/src/air/logup_gkr.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/air/src/air/logup_gkr.rs b/air/src/air/logup_gkr.rs index c56316751..06ffaf55d 100644 --- a/air/src/air/logup_gkr.rs +++ b/air/src/air/logup_gkr.rs @@ -9,6 +9,13 @@ use math::{ExtensionOf, FieldElement, StarkField, ToElements}; use super::EvaluationFrame; +/// A trait containing the necessary information in order to run the LogUp-GKR protocol of [1]. +/// +/// The trait contains useful information for running the GKR protocol as well as for implementing +/// the univariate IOP for multi-linear evaluation of Section 5 in [1] for the final evaluation +/// check resulting from GKR. +/// +/// [1]: https://eprint.iacr.org/2023/1284 pub trait LogUpGkrEvaluator: Clone + Sync { /// Defines the base field of the evaluator. type BaseField: StarkField; From 1847d0d0fb1ee3ae4d2999b0382d9aee91907c97 Mon Sep 17 00:00:00 2001 From: Al-Kindi-0 <82364884+Al-Kindi-0@users.noreply.github.com> Date: Tue, 20 Aug 2024 19:15:02 +0200 Subject: [PATCH 27/28] chore: improve var naming --- sumcheck/src/prover/plain.rs | 115 ++++++++++++++++++++++------------- 1 file changed, 72 insertions(+), 43 deletions(-) diff --git a/sumcheck/src/prover/plain.rs b/sumcheck/src/prover/plain.rs index 97ce441be..a3b16b691 100644 --- a/sumcheck/src/prover/plain.rs +++ b/sumcheck/src/prover/plain.rs @@ -47,7 +47,7 @@ use crate::{ /// [1]: https://eprint.iacr.org/2023/1284 #[allow(clippy::too_many_arguments)] pub fn sumcheck_prove_plain>( - claim: E, + mut claim: E, r_batch: E, p: MultiLinearPoly, q: MultiLinearPoly, @@ -56,11 +56,9 @@ pub fn sumcheck_prove_plain Result, SumCheckProverError> { let mut round_proofs = vec![]; - let mut claim = claim; let mut challenges = vec![]; // construct the vector of multi-linear polynomials - // TODO: avoid unnecessary allocation let (mut p0, mut p1) = p.project_least_significant_variable(); let (mut q0, mut q1) = q.project_least_significant_variable(); @@ -68,9 +66,10 @@ pub fn sumcheck_prove_plain Date: Tue, 20 Aug 2024 19:25:50 +0200 Subject: [PATCH 28/28] doc: improve --- air/src/air/logup_gkr.rs | 4 ++-- sumcheck/src/prover/plain.rs | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/air/src/air/logup_gkr.rs b/air/src/air/logup_gkr.rs index 06ffaf55d..98054c938 100644 --- a/air/src/air/logup_gkr.rs +++ b/air/src/air/logup_gkr.rs @@ -10,11 +10,11 @@ use math::{ExtensionOf, FieldElement, StarkField, ToElements}; use super::EvaluationFrame; /// A trait containing the necessary information in order to run the LogUp-GKR protocol of [1]. -/// +/// /// The trait contains useful information for running the GKR protocol as well as for implementing /// the univariate IOP for multi-linear evaluation of Section 5 in [1] for the final evaluation /// check resulting from GKR. -/// +/// /// [1]: https://eprint.iacr.org/2023/1284 pub trait LogUpGkrEvaluator: Clone + Sync { /// Defines the base field of the evaluator. diff --git a/sumcheck/src/prover/plain.rs b/sumcheck/src/prover/plain.rs index a3b16b691..e0092cf10 100644 --- a/sumcheck/src/prover/plain.rs +++ b/sumcheck/src/prover/plain.rs @@ -44,6 +44,8 @@ use crate::{ /// Instead of executing two runs of the sum-check protocol, a batching randomness `r_batch` is /// sent by the verifier at the outset in order to batch the two statments. /// +/// Note that the degree of the non-linear composition polynomial is 3. +/// /// [1]: https://eprint.iacr.org/2023/1284 #[allow(clippy::too_many_arguments)] pub fn sumcheck_prove_plain>(