Skip to content

Instantly share code, notes, and snippets.

@mimoo
Created March 5, 2025 17:34
Show Gist options
  • Save mimoo/d07a2f2da2620f3c34861e1fda032914 to your computer and use it in GitHub Desktop.
Save mimoo/d07a2f2da2620f3c34861e1fda032914 to your computer and use it in GitHub Desktop.
protostar
use super::acc_prover::PowersOfBeta;
use crate::equations::{Exp, Variable};
use std::{collections::HashMap, sync::Arc};
/// A check in protostar.
/// We assume that it is given in a list of homogeneous polynomials,
/// starting at degree 0 and strictly increasing by 1.
#[derive(Clone)]
pub enum Check<F, V> {
/// Either a list of homogeneous polynomials from deg0 strictly increasing,
Simple(Vec<Exp<F, V>>),
/// An iterator that produces a similar list of polynomials (for checks that end up repeating).
Repeated(EqIterator<F, V>),
}
impl<F, V> Check<F, V>
where
V: Variable,
{
pub fn len(&self) -> usize {
match self {
Check::Simple(_) => 1,
Check::Repeated(i) => i.len(),
}
}
/// Checks that the list of polynomials starts from degree 0 and strictly increases by 1 each time.
/// (e.g. [deg0, deg1, deg2, ...])
pub fn validate(&self, eq_cfg: &HashMap<V, bool>)
where
F: Clone,
V: Clone,
{
let polys: Vec<_> = match self {
Check::Simple(x) => x.clone(),
Check::Repeated(i) => i.clone().next().unwrap(),
};
polys
.iter()
.enumerate()
.for_each(|(degree, poly)| assert_eq!(poly.degree(&eq_cfg), degree))
}
pub fn degree(&self, cfg: &HashMap<V, bool>) -> usize
// TODO: this shouldn't be clone
where
F: Clone,
V: Clone,
{
match self {
Check::Simple(x) => x.last().unwrap().degree(cfg),
Check::Repeated(hpolys) => {
let mut hpolys: EqIterator<_, _> = hpolys.clone();
hpolys.next().unwrap().last().unwrap().degree(cfg)
}
}
}
pub fn sqrt_l(&self) -> usize {
let l = self.len();
if l == 1 {
1
} else {
(l as f64).sqrt().ceil() as usize
}
}
pub fn evaluate(
&self,
env: &V::Env<F>,
powers_of_beta: &PowersOfBeta<F>,
beta_power: usize,
) -> (usize, Vec<F>)
where
F: ark_ff::Field,
{
match self {
Check::Simple(eq) => {
let next_beta = powers_of_beta.get(beta_power);
let res = eq
.into_iter()
.map(|hpoly| hpoly.evaluate(env) * next_beta)
.collect();
(beta_power + 1, res)
}
Check::Repeated(i) => i.evaluate(env, powers_of_beta, beta_power),
}
}
}
//
// Iterator to build equations that repeat throughout a circuit
//
type ProduceCheck<F, V> = Arc<dyn Fn(usize) -> Vec<Exp<F, V>>>;
#[derive(Clone)]
pub struct EqIterator<F, V> {
counter: usize,
end: usize,
// TODO: maybe a better signature here would also take a circuit configuration which could be parameterized. For example, we could want a circuit configuration that has coeffs to do plonk. I guess I can think about that when I try to implement plonk here?
formula: ProduceCheck<F, V>,
}
impl<F, V> EqIterator<F, V> {
pub fn new(start: usize, end: usize, formula: ProduceCheck<F, V>) -> Self {
Self {
counter: start,
end,
formula,
}
}
pub fn validate(&self, eq_cfg: &HashMap<V, bool>) -> Result<(), String>
where
V: Variable,
{
todo!();
let input = self.end - 1;
for (expected_degree, check) in (self.formula)(input).iter().enumerate() {
if check.degree(eq_cfg) != expected_degree {
return Err("malformed formula".to_string());
}
}
Ok(())
}
pub fn degree(&self) -> usize {
todo!(); // TODO: check that this would work
// the formula might return an error for an arbitrary input, so we use the final input which should work:
let input = self.end - 1;
(self.formula)(input).len() + 1
}
pub fn len(&self) -> usize {
self.end - self.counter
}
pub fn empty() -> Self {
Self {
counter: 0,
end: 0,
formula: Arc::new(|_| unreachable!()),
}
}
/// Evaluate the linear combinations (using powers of Beta) of each hpoly,
/// considering that we might be starting at an offset of powers of beta already.
/// Return the next available power of Beta.
pub fn evaluate(
&self,
env: &V::Env<F>,
powers_of_beta: &PowersOfBeta<F>,
mut beta_power: usize,
) -> (usize, Vec<F>)
where
F: ark_ff::Field,
V: Variable,
{
// first iteration (to get size of vec)
let mut hpolys = self.clone();
let next_hpoly = hpolys.next().unwrap();
let next_beta = powers_of_beta.get(beta_power);
let beta = powers_of_beta.beta();
beta_power += 1;
let mut res: Vec<F> = next_hpoly
.into_iter()
.map(|hpoly| hpoly.evaluate(&env) * next_beta)
.collect();
// rest of iteration
for hpolys in hpolys {
assert_eq!(hpolys.len(), res.len());
// we need to get the beta like this, instead of computing them manually,
// because in some cases it will be equal to
// (X * B1[i] + B2[i])(X * B1'[i] + B2'[i]) for some X
let beta_i = powers_of_beta.get(beta_power);
beta_power += 1;
hpolys.into_iter().enumerate().for_each(|(ii, hpoly)| {
res[ii] += hpoly.evaluate(&env) * beta_i;
});
}
(beta_power, res)
}
}
impl<F, V> Iterator for EqIterator<F, V> {
type Item = Vec<Exp<F, V>>;
fn next(&mut self) -> Option<Vec<Exp<F, V>>> {
if self.counter == self.end {
None
} else {
let res = (self.formula)(self.counter);
self.counter += 1;
Some(res)
}
}
}
use std::{collections::HashMap, marker::PhantomData, sync::Arc};
use ark_ff::{One, Zero};
use ark_poly::{
univariate::DensePolynomial, DenseUVPolynomial, EvaluationDomain, Evaluations, Polynomial,
Radix2EvaluationDomain,
};
use itertools::Itertools;
use crate::{
equations::{
plonkish::{PlonkishEnv, PlonkishVar},
Exp, Variable,
},
fiat_shamir::Duplex,
poly_commit::{PolyCom, ToFieldElements, PCS},
poseidon2::PoseidonParams,
protostar::acc_verifier::AccVerifier,
};
use super::{checks::Check, circuits::Circuits};
//
// Helper?
//
/// Returns the scalar field of a given curve.
type ScalarField<C> = <C as ark_ec::AffineRepr>::ScalarField;
//
// DATA STRUCTURES
//
/// An accumulator instance is parameterized by:
/// - a field F
/// - a commitment scheme C
/// - a number of rounds K (with K-1 round trips and one final message from the prover)
///
/// Unfortunately at the moment `KM1` is a thing, because we can't do `K-1` manually... (without `generic_const_exprs`)
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct AccInstance<CK>
where
CK: PCS,
{
/// The public input.
pub(crate) public_input: Vec<CK::Field>,
/// Commitments produced by the prover.
pub(crate) commitments: Commitments<CK::PolyCom>,
/// Challenges produced by the verifier (i.e. Fiat-Shamir).
pub(crate) challenges: Challenges<CK::Field>,
/// The sum of the error terms.
pub(crate) error: CK::Field,
/// Same as `error` but for the beta checks' error terms.
pub(crate) beta_errors: CK::PolyCom,
/// The slack value (mu in the paper).
pub(crate) slack: CK::Field,
}
impl<CK> AccInstance<CK>
where
CK: PCS,
{
/// Folds two accumulator instances into one given a random value `alpha`.
pub fn fold_with(&self, alpha: CK::Field, other: &Self, error_terms: &[CK::Field]) -> Self {
let public_input = self
.public_input
.iter()
.zip(&other.public_input)
.map(|(a, b)| (alpha * a) + b)
.collect_vec();
let commitments = self.commitments.fold_with(alpha, &other.commitments);
let challenges = self.challenges.fold_with(alpha, &other.challenges);
// we want to evaluate this polynomial:
// other.error + e_1 X + e_2 X^2 + ...
let error = {
let mut error_coeffs = vec![other.error];
error_coeffs.extend(error_terms);
let error_poly = DensePolynomial::from_coefficients_slice(&error_coeffs);
error_poly.evaluate(&alpha)
};
let beta_errors = self.beta_errors.scale(&alpha).add(&other.beta_errors);
let slack = self.slack * alpha + other.slack;
Self {
public_input,
commitments,
challenges,
error,
beta_errors,
slack,
}
}
}
/// Commitments produced during a proof.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Commitments<C> {
/// Commitment to the witness.
pub(crate) witness_com: C,
/// Commitment to the powers of betas.
pub(crate) betas_com: C,
}
impl<F, C> ToFieldElements for Commitments<C>
where
F: ark_ff::Field,
C: ToFieldElements<Field = F>,
{
type Field = F;
fn to_field_elements(&self) -> Vec<Self::Field> {
let Self {
witness_com,
betas_com,
} = self;
let mut res = witness_com.to_field_elements();
res.extend(betas_com.to_field_elements());
res
}
}
impl<C> Commitments<C>
where
C: PolyCom,
{
/// Folds two commitments into one given a random value `alpha`.
fn fold_with(&self, alpha: ScalarField<C::Curve>, other: &Self) -> Self {
let witness_com = self.witness_com.scale(&alpha).add(&other.witness_com);
let betas_com = self.betas_com.scale(&alpha).add(&other.betas_com);
Self {
witness_com,
betas_com,
}
}
}
/// Challenges produced during a proof.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Challenges<F> {
// TODO: beta is not part of the fiat-shamir, so it should not be here but outside of here
/// The challenge used to compress all checks into a single check.
pub(crate) beta: F,
}
impl<F> ToFieldElements for Challenges<F>
where
F: ark_ff::Field,
{
type Field = F;
fn to_field_elements(&self) -> Vec<Self::Field> {
let &Self { beta } = self;
vec![beta]
}
}
impl<F> Challenges<F>
where
F: ark_ff::Field,
{
/// Folds two [Challenges] into one given a random value `alpha`.
fn fold_with(&self, alpha: F, other: &Self) -> Self {
let beta = (self.beta * alpha) + other.beta;
Self { beta }
}
}
impl<CK> ToFieldElements for AccInstance<CK>
where
CK: PCS,
{
type Field = CK::Field;
fn to_field_elements(&self) -> Vec<Self::Field> {
let Self {
public_input,
commitments,
challenges,
error,
beta_errors,
slack,
} = self;
let mut res = public_input.clone();
res.extend(commitments.to_field_elements());
res.extend(challenges.to_field_elements());
res.push(*error);
res.extend(beta_errors.to_field_elements());
res.push(*slack);
res
}
}
/// The witness for an accumulator instance.
#[derive(Debug, Clone)]
pub struct AccWitness<F> {
/// The witness vector. (Usually the execution trace of the circuit.)
pub(crate) witness: Vec<F>,
/// Some powers of beta, used to lighten the accumulation verifier's load.
// TODO: this is essential right? We can't recreate this once it's been folded, so we have to keep it around.
pub(crate) powers_of_beta: PowersOfBeta<F>,
}
impl<F> AccWitness<F>
where
F: ark_ff::Field,
{
/// Folds two [AccWitness] into one given a random value `alpha`.
pub fn fold_with(&self, alpha: F, other: &Self) -> Self {
let witness = self
.witness
.iter()
.zip(other.witness.iter())
.map(|(a, b)| (alpha * a) + b)
.collect_vec();
let powers_of_beta = self.powers_of_beta.fold_with(alpha, &other.powers_of_beta);
Self {
witness,
powers_of_beta,
}
}
}
/// A split accumulator consists of an accumulator instance and an accumulator witness.
#[derive(Debug, Clone)]
pub struct SplitAccumulator<CK>
where
CK: PCS,
{
/// The instance part.
pub(crate) instance: AccInstance<CK>,
/// The witness part.
pub(crate) witness: AccWitness<CK::Field>,
}
impl<CK> SplitAccumulator<CK>
where
CK: PCS,
{
/// Folds two [SplitAccumulator] into one given a random value `alpha`.
fn fold_with(&self, alpha: CK::Field, other: &Self, error_terms: &[CK::Field]) -> Self {
let instance = self.instance.fold_with(alpha, &other.instance, error_terms);
let witness = self.witness.fold_with(alpha, &other.witness);
Self { instance, witness }
}
}
/// The proof accompanying the accumulation of two accumulator instances. (`pf` in the Protostar paper.)
#[derive(Debug, Clone)]
pub struct AccumulationProof<CK>
where
CK: PCS,
{
/// The error terms of the V check.
pub(crate) error: Vec<CK::Field>,
/// Commitment to the error terms of the Beta checks (which are of degree 2).
pub(crate) beta_errors: CK::PolyCom,
}
/// A fresh proof is a simple accumulator with no error terms and a slack value of one.
/// It represents, obviously, a proof.
#[derive(Debug, Clone)]
pub struct FreshProof<CK>
where
CK: PCS,
{
/// The instance part.
instance: FreshProofInstance<CK>,
/// The witness part.
witness: FreshProofWitness<CK::Field>,
}
impl<CK> FreshProof<CK>
where
CK: PCS,
{
/// Turns a fresh proof and its associated public input into an accumulator.
/// The accumulator is a trivial one with no error and a slack of one.
fn into_accumulator<Hash: Duplex<Field = CK::Field>>(
self,
public_input: Vec<CK::Field>,
sqrt_l: usize,
) -> SplitAccumulator<CK> {
let FreshProofInstance { commitments } = self.instance;
let FreshProofWitness { witness } = self.witness;
let challenges =
produce_all_challenges::<CK, Hash>(&public_input, &commitments.witness_com);
let powers_of_beta = PowersOfBeta::new(challenges.beta, sqrt_l);
let instance = AccInstance {
public_input,
commitments,
challenges,
error: CK::Field::zero(),
beta_errors: CK::PolyCom::empty(),
slack: CK::Field::one(),
};
let witness = AccWitness {
witness,
powers_of_beta,
};
SplitAccumulator { instance, witness }
}
}
/// The instance part of a [FreshProof].
#[derive(Debug, Clone)]
pub struct FreshProofInstance<CK>
where
CK: PCS,
{
/// The commitments sent during the proof by the prover.
commitments: Commitments<CK::PolyCom>,
}
impl<CK> FreshProofInstance<CK>
where
CK: PCS,
{
/// Turns a fresh proof instance and its associated public input into an accumulator instance.
pub(crate) fn into_acc_instance<Hash: Duplex<Field = CK::Field>>(
self,
public_input: Vec<CK::Field>,
sqrt_l: usize,
) -> AccInstance<CK> {
let challenges =
produce_all_challenges::<CK, Hash>(&public_input, &self.commitments.witness_com);
let powers_of_beta = PowersOfBeta::new(challenges.beta, sqrt_l);
AccInstance {
public_input,
commitments: self.commitments,
challenges,
error: CK::Field::zero(),
beta_errors: CK::PolyCom::empty(),
slack: CK::Field::one(),
}
}
}
/// The witness of a [FreshProof].
#[derive(Debug, Clone)]
pub struct FreshProofWitness<F> {
/// The witness. Usually this represents an execution trace.
witness: Vec<F>,
// TODO: reintroduce powers_of_beta?
}
/// Produces all the challenges r_i <- rho_nark(r_{i-1}, C_i) for all i in [k-1]
/// where r0 = rho_nark(pi).
fn produce_all_challenges<CK, Hash>(
public_input: &[CK::Field],
witness_com: &CK::PolyCom,
) -> Challenges<CK::Field>
where
CK: PCS,
Hash: Duplex<Field = CK::Field>,
{
//
let mut rho_nark = Hash::new();
rho_nark.absorb(&[CK::Field::from(5u8)]); // RHO_NARK
// absorb public input
rho_nark.absorb(public_input);
// absorb witness
rho_nark.absorb(&witness_com.to_field_elements());
// beta
let beta = rho_nark.squeeze();
//
Challenges { beta }
}
/// The accumulation prover.
struct AccProver<CK, Hash>
where
CK: PCS,
Hash: Duplex<Field = CK::Field>,
{
/// A commitment key.
commit_key: Arc<CK>,
/// The set of circuits supported by the scheme.
// TODO: do all circuits have to share the same witness/public input size? if so we should enforce that
circuits: Circuits<CK::Field>,
phantom: PhantomData<Hash>,
}
impl<CK, Hash> AccProver<CK, Hash>
where
CK: PCS,
Hash: Duplex<Field = CK::Field>,
{
/// Creates a new [AccProver] given a commitment key, and a circuit.
fn new(commit_key: Arc<CK>, circuits: Circuits<CK::Field>) -> Self {
Self {
// TODO: add public_input_size?
commit_key,
circuits,
phantom: PhantomData,
}
}
/// Returns the [AccVerifier] related to this accumulation prover.
fn acc_verifier(&self) -> AccVerifier<CK, Hash> {
AccVerifier {
commit_key: self.commit_key.clone(),
circuits: self.circuits.clone(),
phantom: PhantomData,
}
}
/// Produces a fresh proof given an instance-witness pair.
fn to_proof(&self, public_input: Vec<CK::Field>, witness: Vec<CK::Field>) -> FreshProof<CK> {
// commit to witness
let witness_poly = DensePolynomial::from_coefficients_slice(&witness);
let witness_com = self.commit_key.commit_non_hiding(&witness_poly);
// produce challenges
let challenges = produce_all_challenges::<CK, Hash>(&public_input, &witness_com);
// produce powers of beta
// TODO: is this necessary for a fresh proof witness? we can easily recompute this
let sqrt_l = &self.circuits.sqrt_l();
// commit to betas
let powers_of_beta = PowersOfBeta::new(challenges.beta, *sqrt_l);
let mut betas_coeffs = powers_of_beta.small_powers[1..].to_vec();
betas_coeffs.extend(&powers_of_beta.big_powers[1..]);
let betas_poly = DensePolynomial::from_coefficients_slice(&betas_coeffs);
let betas_com = self.commit_key.commit_non_hiding(&betas_poly);
// create commitment struct
let commitments = Commitments {
witness_com,
betas_com,
};
let instance = FreshProofInstance { commitments };
let witness = FreshProofWitness { witness };
FreshProof { instance, witness }
}
/// Produce an accumulator (instance and witness) given an instance-witness pair.
fn to_accumulator(
&self,
public_input: Vec<CK::Field>,
witness: Vec<CK::Field>,
) -> SplitAccumulator<CK> {
// commit to witness
let witness_poly = DensePolynomial::from_coefficients_slice(&witness);
// TODO: there's no notion of blinding in protostar atm
let witness_com = self.commit_key.commit_non_hiding(&witness_poly);
// produce challenges
let challenges = produce_all_challenges::<CK, Hash>(&public_input, &witness_com);
// produce powers of beta
let sqrt_l = &self.circuits.sqrt_l();
let powers_of_beta = PowersOfBeta::new(challenges.beta, *sqrt_l);
let witness = AccWitness {
witness,
powers_of_beta,
};
// commit to betas
let mut betas_coeffs = witness.powers_of_beta.small_powers[1..].to_vec();
betas_coeffs.extend(&witness.powers_of_beta.big_powers[1..]);
let betas_poly = DensePolynomial::from_coefficients_slice(&betas_coeffs);
let betas_com = self.commit_key.commit_non_hiding(&betas_poly);
// create commitment struct
let commitments = Commitments {
witness_com,
betas_com,
};
let instance = AccInstance {
public_input,
commitments,
challenges,
error: CK::Field::zero(),
beta_errors: CK::PolyCom::empty(),
slack: CK::Field::one(),
};
SplitAccumulator { instance, witness }
}
fn produce_beta_error_terms(
&self,
acc1: &SplitAccumulator<CK>,
acc2: &SplitAccumulator<CK>,
) -> Vec<CK::Field> {
// we don't need an error term for B_i[1] - chal.B = 0 because it's of degree 1
// all the other checks are of degree 2
let sqrt_l = self.circuits.sqrt_l();
let mut beta_errors = Vec::with_capacity(2 * sqrt_l - 3);
// for the other stuff we can compute the error terms by hand
// B[i+1] - B[i] * B for i in [1, sqrt_l - 2]
//
// which gives us
//
// (X+mu)(acc1.B[i+1] * X + acc2.B[i+1])
// - (acc1.B[i] * X + acc2.B[i])(acc1.chals.B * X + acc2.chals.B)
//
// = X (acc2.B[i+1] + mu * acc1.B[i+1] - acc1.B[i] * acc2.chals.B - acc2.B[i] * acc1.chals.B) + ...
//
for ii in 1..sqrt_l - 1 {
beta_errors.push(
acc2.witness.powers_of_beta.small_powers[ii + 1]
+ acc1.witness.powers_of_beta.small_powers[ii + 1] * acc2.instance.slack
- acc1.witness.powers_of_beta.small_powers[ii] * acc1.instance.challenges.beta
- acc2.witness.powers_of_beta.small_powers[ii] * acc2.instance.challenges.beta,
);
}
// B'[1] - B[sqrt_l-1] * B = 0
//
// which gives us
//
// (X+mu)(acc1.B'[1] * X + acc2.B'[1])
// - (acc1.B[sqrt_l-1] * X + acc2.B[sqrt_l-1])(acc1.chals.B * X + acc2.chals.B)
//
// = X (acc2.B'[1] + mu * acc1.B'[1] - acc1.B[sqrt_l-1] * acc2.chals.B - acc2.B[sqrt_l-1] * acc1.chals.B) + ...
//
beta_errors.push(
acc2.witness.powers_of_beta.big_powers[1]
+ acc2.instance.slack * acc1.witness.powers_of_beta.big_powers.last().unwrap()
- *acc1.witness.powers_of_beta.small_powers.last().unwrap()
* acc2.instance.challenges.beta
- *acc2.witness.powers_of_beta.small_powers.last().unwrap()
* acc1.instance.challenges.beta,
);
// B'[i+1] - B'[i] * B'[1] for i in [1, sqrt_l - 2]
//
// which gives us
//
// (X+mu)(acc1.B'[i+1] * X + acc2.B'[i+1])
// - (acc1.B'[i] * X + acc2.B'[i])(acc1.B'[1] * X + acc2.B'[1])
//
// = X (acc2.B'[i+1] + mu * acc1.B'[i+1] - acc1.B'[i] * acc2.B'[1] - acc2.B'[i] * acc1.B'[1]) + ...
for ii in 1..sqrt_l - 1 {
beta_errors.push(
acc2.witness.powers_of_beta.big_powers[ii + 1]
+ acc1.witness.powers_of_beta.big_powers[ii + 1] * acc2.instance.slack
- acc1.witness.powers_of_beta.big_powers[ii]
* acc2.witness.powers_of_beta.big_powers[1]
- acc2.witness.powers_of_beta.big_powers[ii]
* acc1.witness.powers_of_beta.big_powers[1],
);
}
dbg!(&beta_errors);
//
beta_errors
}
/// Produces the error terms during the computation of V.
/// - e <-- the constant error (unused in the protocol)
/// - e_j <-- the error terms (in the cross products)
// TODO: we should preprocess our checks and come up with direct equations for all these error terms (instead of using FFT and evaluations)
fn produce_error_terms(
&self,
acc1: &SplitAccumulator<CK>,
acc2: &SplitAccumulator<CK>,
) -> (CK::Field, Vec<CK::Field>) {
// current formulas assume that acc1 is a normal proof
assert!(acc1.instance.slack.is_one());
assert!(acc1.instance.error.is_zero());
// encode assumption on polynomials
let mut cst_vars = HashMap::new();
cst_vars.insert(PlonkishVar::Coeff(0), true);
self.circuits.validate(&cst_vars);
// TODO: in debug mode run decider on both acc?
// gate degree
let degree = todo!(); // self.circuits.degree(&cst_vars);
// I need at least degree + 1 evaluations,
// but to use the FFT interpolation a power of 2 is necessary
// due to the Beta optimization, we bump the degree of 2
// TODO: do we really need FFT here? since degree is likely low (<10) FFT might be overkill.
let evals = (degree + 2) + 1;
let domain: Radix2EvaluationDomain<CK::Field> = Radix2EvaluationDomain::new(evals).unwrap();
// compute evaluation of the polynomial that sums the gates
let mut res = vec![CK::Field::zero(); domain.size()];
for (ii, eval_x) in domain.elements().enumerate() {
// fold with X <- domain
let public_input = &acc1
.instance
.public_input
.iter()
.zip(&acc2.instance.public_input)
.map(|(a, b)| (eval_x * a) + b)
.collect_vec();
let witness = &acc1
.witness
.witness
.iter()
.zip(&acc2.witness.witness)
.map(|(a, b)| (eval_x * a) + b)
.collect_vec();
let challenges = &Challenges {
beta: (eval_x * acc1.instance.challenges.beta) + acc2.instance.challenges.beta,
};
let env = PlonkishEnv {
public_input: public_input.clone(), // TODO: bad clone
coeffs: vec![],
witness: witness.clone(), // TODO: bad clone
challenges: challenges.clone(),
};
let powers_of_beta = acc1
.witness
.powers_of_beta
.fold_with(eval_x, &acc2.witness.powers_of_beta);
// sum the evaluation of the homogeneous polynomial on the folded values
let mut beta_power = 0;
let (beta_power, f) =
self.circuits
.evaluate::<PlonkishVar>(degree, &env, &powers_of_beta, beta_power);
// powers of x + mu
let xmu = acc1.instance.slack * eval_x + acc2.instance.slack;
let mut all_xmu = vec![CK::Field::zero(); degree + 1];
all_xmu[0] = CK::Field::one();
for i in 1..=degree {
all_xmu[i] = all_xmu[i - 1] * xmu;
}
all_xmu.reverse();
// sum_{j=0}^d (x+mu)^(d-j) * f_j
for (xmu_dmj, f_j) in all_xmu.into_iter().zip(&f) {
res[ii] += xmu_dmj * f_j;
}
}
// interpolate
let poly = Evaluations::from_vec_and_domain(res.clone(), domain).interpolate();
dbg!(&poly);
// the X^(d+2) coefficient should be 0
// (initially it was the X^d coeff, but we are using the Beta which bump the degree by 2 now)
assert_eq!(poly.degree(), degree + 1);
// constant is the error e
let mut coeffs = poly.coeffs().into_iter();
let e = coeffs.next().unwrap();
// the rest of the coefficients are the error terms
let errors: Vec<CK::Field> = coeffs.cloned().collect();
//
(*e, errors)
}
/// Accumulates two accumulators into a single one.
/// In the current form, the second accumulator is actually a fresh proof (a trivial accumulator).
fn accumulate(
&self,
acc: SplitAccumulator<CK>,
proof: (Vec<CK::Field>, FreshProof<CK>),
) -> (SplitAccumulator<CK>, AccumulationProof<CK>) {
// sanitize
assert_eq!(acc.instance.public_input.len(), proof.0.len());
assert_eq!(acc.witness.witness.len(), proof.1.witness.witness.len());
// get number of checks
let sqrt_l = self.circuits.sqrt_l();
// (this will do step 1.)
// convert fresh proof into an accumulator
let mut proof_acc = proof.1.into_accumulator::<Hash>(proof.0, sqrt_l);
// (step 2.)
// produce all the error terms
let (_e, error_terms) = self.produce_error_terms(&proof_acc, &acc);
// do the same for the beta error terms
let beta_error_terms = self.produce_beta_error_terms(&proof_acc, &acc);
let beta_error_poly = DensePolynomial::from_coefficients_vec(beta_error_terms);
let beta_error_commit = self.commit_key.commit_non_hiding(&beta_error_poly);
// at this point we can replace proof.beta_errors with the calculated beta error terms,
// this is a hack because later in the folding function we'll get what we want that:
// new_acc.beta_E = proof.beta_E * alpha + acc.beta_E
assert!(!beta_error_commit.is_zero());
proof_acc.instance.beta_errors = beta_error_commit.clone();
// (step 4.)
// sample alpha
let alpha = {
let mut rho_acc = Hash::new();
rho_acc.absorb(&[CK::Field::from(8u8)]); // RHO_ACC
let mut to_hash = acc.instance.to_field_elements();
to_hash.extend(&proof_acc.instance.public_input); // pi
let Commitments {
witness_com,
betas_com,
} = &proof_acc.instance.commitments;
to_hash.extend(witness_com.to_field_elements());
to_hash.extend(betas_com.to_field_elements());
to_hash.extend(&error_terms);
to_hash.extend(beta_error_commit.to_field_elements());
// TODO: should we hash anything else? Like mu?
rho_acc.absorb(&to_hash);
rho_acc.squeeze()
};
// (step 5 to end)
// finally, fold the two accumulators
dbg!(&proof_acc.instance.beta_errors, &acc.instance.beta_errors);
let new_acc = proof_acc.fold_with(alpha, &acc, &error_terms);
//
(
new_acc,
AccumulationProof {
error: error_terms,
beta_errors: beta_error_commit,
},
)
}
}
//
// Helper for powers of beta
//
/// A [PowersOfBeta] object contains _some_ precomputed powers of beta
/// necessary to lighten the accumulator verifier's load.
#[derive(Debug, Clone)]
pub struct PowersOfBeta<F> {
/// [1, B, B^2, ..., B^{sqrt(l) - 1}]
pub(crate) small_powers: Vec<F>,
/// [1, B^{sqrt(l)}, B^{2 * sqrt(l)}, ..., B^{(sqrt(l) - 1) * sqrt(l)}]
pub(crate) big_powers: Vec<F>,
}
impl<F> PowersOfBeta<F>
where
F: ark_ff::Field,
{
/// Creates a [PowersOfBeta] object given `sqrt(l)`,
/// where `l` is the number of checks we're compressing.
fn new(beta: F, sqrt_l: usize) -> Self {
// 1, B, B^2, ..., B^{sqrt(l) - 1}
let mut small_powers = Vec::with_capacity(sqrt_l);
small_powers.push(F::one());
for i in 1..sqrt_l {
small_powers.push(small_powers[i - 1] * beta);
}
// 1, B^{sqrt(l)}, B^{2 * sqrt(l)}
// ...
// B^{(sqrt(l) - 1) * sqrt(l)}
let mut big_powers = Vec::with_capacity(sqrt_l);
let beta_sqrt_l = *small_powers.last().unwrap() * beta;
big_powers.push(F::one());
for i in 1..sqrt_l {
big_powers.push(big_powers[i - 1] * beta_sqrt_l);
}
Self {
small_powers,
big_powers,
}
}
/// Returns Beta.
pub fn beta(&self) -> F {
self.small_powers[1]
}
/// Returns the i-th power of Beta.
/// It is important that it uses the Beta optimization of multiplying two values,
/// because this is what we use in the folding as well.
pub fn get(&self, power: usize) -> F {
// e.g. we have sqrt(l) = 4 (i.e. l = 16)
//
// - small: (1, beta, beta^2, beta^3)
// - big: (1, beta^4, beta^8, beta^12)
//
// hence
//
// e.g. B^7 = B^4 * B^3 = B[1] * B[3] = B[7/4] * B[7%4]
//
let sqrt_l = self.small_powers.len();
let ii = power / sqrt_l;
let jj = power % sqrt_l;
self.big_powers[ii] * self.small_powers[jj]
}
/// Folds two powers of beta together.
fn fold_with(&self, alpha: F, other: &Self) -> Self {
let small_powers = self
.small_powers
.iter()
.zip(&other.small_powers)
.map(|(a, b)| (alpha * a) + b)
.collect();
let big_powers = self
.big_powers
.iter()
.zip(&other.big_powers)
.map(|(a, b)| (alpha * a) + b)
.collect();
Self {
small_powers,
big_powers,
}
}
/// Validates that the powers of beta are correct.
fn validate(&self, beta: F) -> bool {
// B[0] == 1 and B'[0] == 1
assert_eq!(self.small_powers[0], F::one());
assert_eq!(self.big_powers[0], F::one());
// B[i] = B[i+1] * beta
for (a, b) in self.small_powers.iter().tuple_windows() {
assert_eq!(*a * beta, *b);
}
// B'[0] == B[-1] * beta
let big = *self.small_powers.last().unwrap() * beta;
assert_eq!(self.big_powers[1], big);
// B'[i] = B'[i+1] * beta^{sqrt(l)-1}
for (a, b) in self.big_powers.iter().tuple_windows() {
assert_eq!(*a * big, *b);
}
true
}
}
#[cfg(test)]
mod tests {
use crate::{
equations::plonkish::witness,
plonk::circuit::PlonkCircuit,
poly_commit::{PedersenCommitment, PedersenCommitter},
poseidon2::Poseidon2,
protostar::{checks::EqIterator, circuits::CircuitKind},
};
use super::*;
use ark_ec::AffineRepr;
// pallas curve's scalar
use ark_pallas::{Affine as Pallas, Fr};
type E = Exp<Fr, PlonkishVar>;
#[test]
fn test_betas() {
// l = 16, sqrt(l) = 4
let beta = Fr::from(7u64);
let powers_of_beta = PowersOfBeta::new(beta, 4);
let mut expected = Fr::one();
for ii in 0..16 {
assert_eq!(
powers_of_beta.get(ii),
expected,
"{ii}-th power is expected to be {expected}"
);
expected *= beta;
}
// l = 1, sqrt(l) = 1
let powers_of_beta = PowersOfBeta::new(beta, 1);
assert_eq!(powers_of_beta.get(0), Fr::one());
let powers_of_beta = PowersOfBeta::new(beta, 2);
assert_eq!(powers_of_beta.get(0), Fr::one());
assert_eq!(powers_of_beta.get(1), beta);
}
/// A gate of degree 2 for tests.
/// It checks that `ab - b = 0` (which is true if a=1 or b=0).
fn hpolys(left: usize, right: usize) -> Vec<E> {
let left: E = witness(left);
let right: E = witness(right);
vec![
// deg 0
Exp::zero(),
// deg 1 := -b
-(&right),
// deg 2 := ab
left * right,
]
}
#[test]
fn test_accumulator() {
//
// SETUP circuit/prover
//
// create circuit
dbg!("creating circuit");
let rows = 3;
let repeated_hpolys = EqIterator::new(0, rows, Arc::new(move |ii| hpolys(ii, rows + ii)));
let checks = CircuitKind::Arbitrary(vec![Check::Repeated(repeated_hpolys)]);
let circuit = PlonkCircuit {
public_input_size: 0,
gates: vec![],
wires: 3,
wiring: vec![],
lookups: (),
coefficients: 0,
};
// create keys
//let thread_rng = &mut rand::thread_rng();
use rand::SeedableRng;
let rng = &mut rand::rngs::StdRng::seed_from_u64(123456789);
(0);
let ck = Arc::new(PedersenCommitter::<Pallas>::setup(30, rng));
let acc_prover: AccProver<PedersenCommitter<Pallas>, Poseidon2<Fr>> =
AccProver::new(ck.clone(), Circuits::Plonkish(vec![circuit]));
// public input
let public_input = vec![];
//
// FOLD
//
// the 2 instances of size 3 we're going to fold
// remember, we need a=1 or b=0
let w1: Vec<Fr> = vec![/* a */ 1, 2, 1, /* b */ 7, 0, 8]
.into_iter()
.map(Into::into)
.collect();
let fresh_proof1 = acc_prover.to_proof(public_input.clone(), w1);
// instance 2
let w2: Vec<Fr> = vec![/* a */ 4, 1, 8, /* b */ 0, 7, 0]
.into_iter()
.map(Into::into)
.collect_vec();
let split_acc1 = acc_prover.to_accumulator(public_input.clone(), w2);
// fresh_proof1 __
// \__ accumulate ---> acc2
// split_acc1 ----/
dbg!("accumulate the first two accumulators");
let (acc2, accumulation_proof) = acc_prover.accumulate(
split_acc1.clone(),
(public_input.clone(), fresh_proof1.clone()),
);
// verify the accumulator
assert!(!split_acc1.instance.slack.is_zero());
dbg!("verify the first resulting accumulator");
let acc_verifier = acc_prover.acc_verifier();
let res = acc_verifier.verify_acc(
split_acc1.instance,
(public_input.clone(), fresh_proof1.instance),
accumulation_proof,
acc2.instance.clone(),
);
assert!(res.is_ok());
// run the decider
dbg!("run the decider");
assert!(!acc2.instance.beta_errors.0.is_zero()); // bug
acc_verifier.decider(&acc2).unwrap();
//
// accumulate ONE more time
//
// create a fresh proof
let w3: Vec<Fr> = vec![/* a */ 1, 2, 1, /* b */ 7, 0, 8]
.into_iter()
.map(Into::into)
.collect_vec();
let fresh_proof3 = acc_prover.to_proof(public_input.clone(), w3);
// accumulate
dbg!("acccumulate an additional fresh proof");
let (acc4, accumulation_proof) =
acc_prover.accumulate(acc2.clone(), (public_input.clone(), fresh_proof3.clone()));
// verify the accumulation
dbg!("verify that accumulation");
let acc_verifier = acc_prover.acc_verifier();
acc_verifier
.verify_acc(
acc2.instance,
(public_input.clone(), fresh_proof3.instance),
accumulation_proof,
acc4.instance.clone(),
)
.unwrap();
// run the decider
dbg!("run the decider");
acc_verifier.decider(&acc4).unwrap();
}
}
use std::{collections::HashMap, marker::PhantomData, sync::Arc};
use ark_ff::{One, Zero};
use ark_poly::{
univariate::DensePolynomial, DenseUVPolynomial, Evaluations, Radix2EvaluationDomain,
};
use itertools::Itertools;
use crate::{
equations::{
plonkish::{PlonkishEnv, PlonkishVar},
Variable,
},
fiat_shamir::Duplex,
poly_commit::{ToFieldElements, PCS},
poseidon2::PoseidonParams,
protostar::acc_prover::{
AccInstance, AccumulationProof, Commitments, FreshProofInstance, SplitAccumulator,
},
};
use super::{acc_prover::PowersOfBeta, circuits::Circuits};
/// The accumulator verifier, used to verify the accumulation of instances or run the decider.
pub struct AccVerifier<CK, Hash>
where
CK: PCS,
Hash: Duplex<Field = CK::Field>,
{
// TODO: add public_input_size? + check on public_input_size when verifying
/// Used to commit to a vector of values.
pub(crate) commit_key: Arc<CK>,
/// A description of the circuits we're verifying.
pub(crate) circuits: Circuits<CK::Field>,
pub(crate) phantom: PhantomData<Hash>,
}
#[derive(Debug)]
pub enum VerifyError {}
impl<CK, Hash> AccVerifier<CK, Hash>
where
CK: PCS,
Hash: Duplex<Field = CK::Field>,
{
/// Verify the accumulation of instances:
/// `accumulator instance + proof instance => new accumulator instance`.
pub fn verify_acc(
&self,
acc_instance: AccInstance<CK>,
proof_instance: (Vec<CK::Field>, FreshProofInstance<CK>),
accumulation_proof: AccumulationProof<CK>,
resulting_acc: AccInstance<CK>,
) -> Result<(), VerifyError> {
// convert proof into accumulator
// (this will recompute the challenges)
let sqrt_l = self.circuits.sqrt_l();
let mut proof_acc = proof_instance
.1
.into_acc_instance::<Hash>(proof_instance.0, sqrt_l);
// sample alpha
let alpha = {
let mut rho_acc = Hash::new();
rho_acc.absorb(&[CK::Field::from(8u8)]); // RHO_ACC
let mut to_hash = acc_instance.to_field_elements();
to_hash.extend(&proof_acc.public_input); // pi
let Commitments {
witness_com,
betas_com,
} = &proof_acc.commitments;
to_hash.extend(witness_com.to_field_elements());
to_hash.extend(betas_com.to_field_elements());
to_hash.extend(&accumulation_proof.error);
to_hash.extend(accumulation_proof.beta_errors.to_field_elements());
rho_acc.absorb(&to_hash);
rho_acc.squeeze()
};
// set its error to our accumulation proof
// it's a hack to make the computation of the error work when we fold later below
proof_acc.beta_errors = accumulation_proof.beta_errors;
// fold
let new_acc = proof_acc.fold_with(alpha, &acc_instance, &accumulation_proof.error);
assert_eq!(new_acc, resulting_acc);
// TODO: anything to do with the beta error terms in the accumulation proof?
//
Ok(())
}
/// The decider can be run at the very end of the accumulation,
/// to ensure that a given accumulator is correct.
/// It is normally run in conjunction with some `verify` function to verify the last proof of the IVC.
// TODO: get two arguments, acc instance and acc witnes?
pub fn decider(&self, acc: &SplitAccumulator<CK>) -> Result<(), String> {
// 1. check commitment of the witness
let witness_poly = DensePolynomial::from_coefficients_slice(&acc.witness.witness);
// TODO: there's no notion of blinding in protostar atm
let witness_com = self.commit_key.commit_non_hiding(&witness_poly);
assert_eq!(witness_com, acc.instance.commitments.witness_com);
// 2. commitments for betas as well right?
// but remove the 1 at the beginning of each vectors
let mut betas_coeffs = acc.witness.powers_of_beta.small_powers[1..].to_vec();
betas_coeffs.extend(&acc.witness.powers_of_beta.big_powers[1..]);
let betas_poly = DensePolynomial::from_coefficients_slice(&betas_coeffs);
let expected_betas_com = self.commit_key.commit_non_hiding(&betas_poly);
assert_eq!(expected_betas_com, acc.instance.commitments.betas_com);
// 3. ensure that powers of beta are correctly computed correctly
// to do this we need to check all of the beta check using relaxed equations
// (with mu and potentially error terms that should be part of some proof)
// (but we don't have access to the proof here...)
// that means... I think I need to include Beta errors in the
{
// silly checks: B[0] = B'[0] = 1
// these cannot happen, in reality they will be equal to mu no?
assert_eq!(
acc.witness.powers_of_beta.small_powers[0],
acc.instance.slack
);
assert_eq!(acc.witness.powers_of_beta.big_powers[0], acc.instance.slack);
// beta itself
// TODO: necessary?
let beta = acc.witness.powers_of_beta.small_powers[1];
assert_eq!(beta, acc.instance.challenges.beta);
// TODO: why I don't use mu? These are not the relaxed equations
// collect error terms of small powers checks
let mut beta_errors =
Vec::with_capacity(acc.witness.powers_of_beta.small_powers.len() * 2);
for (b, bp1) in acc.witness.powers_of_beta.small_powers[1..]
.iter()
.tuple_windows()
{
beta_errors.push(acc.instance.slack * *bp1 - *b * beta);
}
// collect error terms of small/big power check
beta_errors.push(
acc.instance.slack * acc.witness.powers_of_beta.big_powers[1]
- *acc.witness.powers_of_beta.small_powers.last().unwrap() * beta,
);
// collect error terms of big powers checks
let big = acc.witness.powers_of_beta.big_powers[1];
for (b, bp1) in acc.witness.powers_of_beta.big_powers[1..]
.iter()
.tuple_windows()
{
beta_errors.push(acc.instance.slack * *bp1 - *b * big);
}
// sanity check
let sqrt_l = self.circuits.sqrt_l();
assert_eq!(beta_errors.len(), 2 * sqrt_l - 3);
dbg!(&beta_errors);
// commit to the errors and check that it matches our Beta error commitment
let beta_errors_poly = DensePolynomial::from_coefficients_slice(&beta_errors);
let beta_errors_com = self.commit_key.commit_non_hiding(&beta_errors_poly);
assert_eq!(beta_errors_com, acc.instance.beta_errors);
}
// 2. compute relaxed equation in the clear
// let res = eq_sum(0, d, |j| {
// mu.pow(d-j) * f_j(pi, m, etc.)
// });
let mut cst_vars = HashMap::new();
cst_vars.insert(PlonkishVar::Coeff(0), true);
let degree = todo!(); // self.circuits.degree(&cst_vars);
// sanitize
self.circuits.validate(&cst_vars);
// compute mu^d-j
let mut mu_d_js = Vec::with_capacity(degree + 1);
mu_d_js.push(CK::Field::one());
for _ in 1..=degree {
mu_d_js.push(*mu_d_js.last().unwrap() * acc.instance.slack);
}
mu_d_js.reverse();
// compute sum
let env = PlonkishEnv {
public_input: acc.instance.public_input.clone(), // TODO: bad clone
coeffs: vec![],
witness: acc.witness.witness.clone(), // TODO: bad clone
challenges: acc.instance.challenges.clone(), // TODO: bad clone
};
let mut beta_power = 0;
let (next_beta_power, f) = self.circuits.evaluate::<PlonkishVar>(
degree,
&env,
&acc.witness.powers_of_beta,
beta_power,
);
let mut error = CK::Field::zero();
for (f_j, mu_d_j) in f.iter().zip(mu_d_js) {
error += mu_d_j * f_j;
}
// check that the computed error matches the one in the instance
assert_eq!(error, acc.instance.error);
Ok(())
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment