diff --git a/Cargo.toml b/Cargo.toml index c30163844eff5f2e5fe12b0d778fba5db33710fd..99c32058a996b7c28f61b3ec281443eb4742d83c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,3 +32,4 @@ rand = "0.8.5" [features] kzg = ["dep:ark-poly-commit"] +aplonk = [] diff --git a/src/algebra.rs b/src/algebra.rs index 5e88243d5052b3864f7cfad5705aeddb7b926b58..b638885d1a9a6c25341dade5a490ffdc8dfaf823 100644 --- a/src/algebra.rs +++ b/src/algebra.rs @@ -1,8 +1,9 @@ -use ark_ec::pairing::Pairing; +use ark_ec::pairing::{Pairing, PairingOutput}; use ark_poly::DenseUVPolynomial; use ark_std::One; use std::ops::{Div, Mul}; +#[cfg(feature = "kzg")] pub(crate) fn scalar_product_polynomial<E, P>(lhs: &[E::ScalarField], rhs: &[P]) -> P where E: Pairing, @@ -22,6 +23,45 @@ where polynomial } +#[cfg(feature = "aplonk")] +pub(super) fn scalar_product_pairing<E: Pairing>(lhs: &[E::G1], rhs: &[E::G2]) -> PairingOutput<E> { + lhs.iter() + .zip(rhs.iter()) + .map(|(l, r)| E::pairing(l, r)) + .sum() +} + +#[cfg(feature = "aplonk")] +pub(super) fn scalar_product<E: Pairing>( + lhs: &[E::ScalarField], + rhs: &[E::ScalarField], +) -> E::ScalarField { + lhs.iter().zip(rhs.iter()).map(|(l, r)| l.mul(r)).sum() +} + +#[cfg(feature = "aplonk")] +pub(super) fn scalar_product_g1<E: Pairing>(lhs: &[E::G1], rhs: &[E::ScalarField]) -> E::G1 { + lhs.iter().zip(rhs.iter()).map(|(l, r)| l.mul(r)).sum() +} + +#[cfg(feature = "aplonk")] +pub(super) fn scalar_product_g2<E: Pairing>(lhs: &[E::G2], rhs: &[E::ScalarField]) -> E::G2 { + lhs.iter().zip(rhs.iter()).map(|(l, r)| l.mul(r)).sum() +} + +#[cfg(feature = "aplonk")] +pub(super) mod vector { + use ark_ff::Zero; + + /// return [0, 0, ..., 0] of size *n* on some group + pub fn zero<Z: Zero + Clone>(capacity: usize) -> Vec<Z> { + let mut vector = Vec::with_capacity(capacity); + vector.resize(capacity, Z::zero()); + + vector + } +} + /// compute the successive powers of a scalar group element /// /// if the scalar number is called *r*, then [`powers_of`] will return the @@ -68,7 +108,9 @@ mod tests { use ark_ec::pairing::Pairing; use ark_ff::PrimeField; use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial}; - use std::ops::Div; + use ark_std::test_rng; + use ark_std::UniformRand; + use std::ops::{Add, Div}; type UniPoly381 = DensePolynomial<<Bls12_381 as Pairing>::ScalarField>; @@ -103,5 +145,53 @@ mod tests { fn polynomial() { polynomial_template::<Bls12_381, UniPoly381>(); } + + fn scalar_template<E: Pairing>(lhs: Vec<u8>, rhs: Vec<u8>, result: u8) { + let lhs = lhs + .iter() + .map(|x| E::ScalarField::from_le_bytes_mod_order(&[*x])) + .collect::<Vec<_>>(); + let rhs = rhs + .iter() + .map(|x| E::ScalarField::from_le_bytes_mod_order(&[*x])) + .collect::<Vec<_>>(); + let result = E::ScalarField::from_le_bytes_mod_order(&[result]); + + assert_eq!(super::super::scalar_product::<E>(&lhs, &rhs), result); + } + + #[test] + fn scalar() { + scalar_template::<Bls12_381>(vec![1, 2], vec![3, 4], 11); + scalar_template::<Bls12_381>(vec![5, 6], vec![7, 8], 83); + } + + #[ignore = "scalar_product_g1 is a clone of scalar_product"] + #[test] + fn g_1() {} + + #[ignore = "scalar_product_g2 is a clone of scalar_product"] + #[test] + fn g_2() {} + + fn pairing_template<E: Pairing>() { + let rng = &mut test_rng(); + + let g_1 = E::G1::rand(rng); + let g_2 = E::G2::rand(rng); + + let pairing = E::pairing(g_1, g_2); + let two_pairings = pairing.add(pairing); + + assert_eq!( + super::super::scalar_product_pairing::<E>(&[g_1, g_1], &[g_2, g_2]), + two_pairings + ); + } + + #[test] + fn pairing() { + pairing_template::<Bls12_381>(); + } } } diff --git a/src/aplonk/ipa.rs b/src/aplonk/ipa.rs new file mode 100644 index 0000000000000000000000000000000000000000..e5e2d98cde9bf722e3fe137e5a8e7176589f9180 --- /dev/null +++ b/src/aplonk/ipa.rs @@ -0,0 +1,428 @@ +use ark_ec::pairing::{Pairing, PairingOutput}; +use ark_ff::Field; +use ark_poly::DenseUVPolynomial; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use std::ops::{Add, Div, Mul}; + +use crate::algebra::{ + powers_of, scalar_product_g1, scalar_product_g2, scalar_product_pairing, vector, +}; +use crate::aplonk::polynomial; +use crate::aplonk::transcript; +use crate::error::KomodoError; + +/// holds the setup parameters of the IPA stage of [aPlonk from [Ambrona et al.]][aPlonK] +/// +/// this can be found in [aPlonk from [Ambrona et al.]][aPlonK] in +/// - page **13**. in Setup.1 +/// - page **13**. in Setup.3 +/// +/// [aPlonk]: https://eprint.iacr.org/2022/1352.pdf +#[derive(Debug, Clone, PartialEq, Default, CanonicalSerialize, CanonicalDeserialize)] +pub struct Params<E: Pairing> { + /// *[\tau]_1* in the paper + pub tau_1: E::G1, + /// *ck_\tau* in the paper + pub ck_tau: Vec<E::G2>, +} + +/// holds all the necessary pieces to prove the IPA stage of [aPlonk from [Ambrona et al.]][aPlonK] +/// this can be found in [aPlonk from [Ambrona et al.]][aPlonK] as +/// *\pi = ({L_G^j, R_G^j, L_r^j, R_r^j}_{j \in [\kappa]}, \mu^0, G^0)* in +/// - page **15**. in IPA.Prove.10 +/// +/// > **Note** +/// > the notations are the same as in the paper, only with all letters in lower +/// > case and the powers at the bottom, e.g. `l_g_j` instead of *L_G^j*, and +/// > with *G* rename as `ck_tau`. +/// +/// [aPlonk]: https://eprint.iacr.org/2022/1352.pdf +#[derive(Debug, Clone, Default, PartialEq, CanonicalSerialize, CanonicalDeserialize)] +pub(super) struct Proof<E: Pairing> { + pub l_g: Vec<PairingOutput<E>>, + pub r_g: Vec<PairingOutput<E>>, + pub l_r: Vec<E::G1>, + pub r_r: Vec<E::G1>, + mu_0: E::G1, + pub ck_tau_0: E::G2, +} + +// compute if a number is a power of two +// +// generated by ChatGPT +fn is_power_of_two(n: usize) -> bool { + n != 0 && (n & (n - 1)) == 0 +} + +/// prove a sequence of commits with a modified IPA +/// +/// > **Note** +/// > when we say *page xx* or *<name of algorithm>*, we refer to the following +/// > paper: [aPlonk from [Ambrona et al.]][aPlonK] +/// +/// the following algorithm +/// - uses the same notations as in the aPlonK paper, modulus the fancy LaTeX fonts +/// - marks the steps of the paper algorithm with `// 1.` or `// 7.` +/// +/// > **Note** +/// > here, we do not use the red steps of the algorithm page **15**, e.g. +/// > we did not have to write the step 8. at all. +/// +/// Arguments: +/// - k: number of polynomials (must be a power of 2) +/// - ck_tau: commitment key of IPA containing *k* values from *G_2* +/// - c_g: sum of pairing of commit, i.e. *com_f* in *Commit-Polys* page **13** +/// - r: random scalar +/// - P: random linear combination of the commits +/// - mu: the actual commits of the *k* polynomials +/// +/// [aPlonk]: https://eprint.iacr.org/2022/1352.pdf +pub(super) fn prove<E: Pairing>( + k: usize, + ck_tau: &[E::G2], + c_g: PairingOutput<E>, + r: E::ScalarField, + p: E::G1, + mu: &[E::G1], +) -> Result<(Proof<E>, Vec<E::ScalarField>), KomodoError> { + if !is_power_of_two(k) { + return Err(KomodoError::Other( + "PolynomialCountIpaError: not a power of 2".to_string(), + )); + } + let kappa = f64::log2(k as f64) as usize; + let mut l_g = vector::zero::<PairingOutput<E>>(kappa); + let mut l_r = vector::zero::<E::G1>(kappa); + let mut r_g = vector::zero::<PairingOutput<E>>(kappa); + let mut r_r = vector::zero::<E::G1>(kappa); + let mut u = vector::zero::<E::ScalarField>(kappa); + + // 1. + let mut mu = mu.to_vec(); + let mut r_vec = powers_of::<E>(r, k); + let mut ck_tau = ck_tau.to_vec(); + let mut ts = match transcript::initialize(c_g, r, p) { + Ok(transcript) => transcript, + Err(error) => return Err(KomodoError::Other(format!("SerializationError: {}", error))), + }; + + // 2. + for j in (0..kappa).rev() { + let (mu_left, mu_right) = mu.split_at(mu.len() / 2); + let (ck_tau_left, ck_tau_right) = ck_tau.split_at(ck_tau.len() / 2); + let (r_left, r_right) = r_vec.split_at(r_vec.len() / 2); + + // 3. + l_g[j] = scalar_product_pairing(mu_left, ck_tau_right); + l_r[j] = scalar_product_g1::<E>(mu_left, r_right); + + // 4. + r_g[j] = scalar_product_pairing(mu_right, ck_tau_left); + r_r[j] = scalar_product_g1::<E>(mu_right, r_left); + + // 5. + u[j] = match transcript::hash(l_g[j], r_g[j], l_r[j], r_r[j], &ts) { + Ok(hash) => hash, + Err(error) => return Err(KomodoError::Other(format!("SerializationError: {}", error))), + }; + ts = match transcript::reset::<E>(u[j]) { + Ok(transcript) => transcript, + Err(error) => return Err(KomodoError::Other(format!("SerializationError: {}", error))), + }; + + let u_j_inv = if let Some(inverse) = u[j].inverse() { + inverse + } else { + return Err(KomodoError::Other("EllipticInverseError".to_string())); + }; + + // 6. + mu = mu_left + .iter() + .zip(mu_right.iter()) + .map(|(l, r)| l.mul(u[j]) + r.mul(u_j_inv)) + .collect(); + // 7. + ck_tau = ck_tau_left + .iter() + .zip(ck_tau_right.iter()) + .map(|(l, r)| l.mul(u_j_inv) + r.mul(u[j])) + .collect(); + // 9. + r_vec = r_left + .iter() + .zip(r_right.iter()) + .map(|(l, r)| l.mul(u_j_inv) + r.mul(u[j])) + .collect(); + } + + // 10. + Ok(( + Proof { + l_g, + r_g, + l_r, + r_r, + mu_0: mu[0], + ck_tau_0: ck_tau[0], + }, + u, + )) +} + +/// verify the integrity of a proven sequence of commits with a modified IPA +/// +/// > **Note** +/// > when we say *page xx* or *<name of algorithm>*, we refer to the following +/// > paper: [aPlonk from [Ambrona et al.]][aPlonK] +/// +/// the following algorithm +/// - uses the same notations as in the aPlonK paper, modulus the fancy LaTeX fonts +/// - marks the steps of the paper algorithm with `// 1.` or `// 7.` +/// +/// > **Note** +/// > here, we do not use the red steps of the algorithm page **15**, e.g. +/// > we did not have to write the step 7.2. at all. +/// +/// Arguments: +/// - k: number of polynomials (must be a power of 2) +/// - ck_tau: commitment key of IPA containing *k* values from *G_2* +/// when set to `None`, will turn *IPA.Verify* into *IPA.Verify'*, without the +/// "scalar product" guard and which is the version of the algorithm used in +/// the *Open* algorithm of aPlonK. +/// - c_g: sum of pairing of commit, i.e. *com_f* in *Commit-Polys* page **13** +/// - r: random scalar +/// - P: random linear combination of the commits +/// - proof: the proof crafted by the *IPA.Prove* algorithm +/// +/// [aPlonk]: https://eprint.iacr.org/2022/1352.pdf +pub(super) fn verify<E, P>( + k: usize, + ck_tau: Option<&Vec<E::G2>>, // this is an option to implement `IPA.Verify'` + c_g: PairingOutput<E>, + r: E::ScalarField, + p: E::G1, + proof: &Proof<E>, +) -> Result<bool, KomodoError> +where + E: Pairing, + P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>, + for<'a, 'b> &'a P: Div<&'b P, Output = P>, +{ + if !is_power_of_two(k) { + return Err(KomodoError::Other( + "PolynomialCountIpaError: not a power of 2".to_string(), + )); + } + let kappa = f64::log2(k as f64) as usize; + let mut ts = match transcript::initialize(c_g, r, p) { + Ok(transcript) => transcript, + Err(error) => return Err(KomodoError::Other(format!("SerializationError: {}", error))), + }; + let mut u = vector::zero::<E::ScalarField>(kappa); + + // 1. + let l_g = &proof.l_g; + let r_g = &proof.r_g; + let l_r = &proof.l_r; + let r_r = &proof.r_r; + let mu_0 = proof.mu_0; + let ck_tau_0 = proof.ck_tau_0; + + // 2. + for j in (0..kappa).rev() { + // 3. + u[j] = match transcript::hash(l_g[j], r_g[j], l_r[j], r_r[j], &ts) { + Ok(hash) => hash, + Err(error) => return Err(KomodoError::Other(format!("SerializationError: {}", error))), + }; + ts = match transcript::reset::<E>(u[j]) { + Ok(transcript) => transcript, + Err(error) => return Err(KomodoError::Other(format!("SerializationError: {}", error))), + }; + } + + let mut u_inv = Vec::new(); + for u_i in &u { + if let Some(inverse) = u_i.inverse() { + u_inv.push(inverse) + } else { + return Err(KomodoError::Other("EllipticInverseError".to_string())); + } + } + + // 4. + let g = polynomial::compute_g::<E, P>(k, kappa, &u, &u_inv); + + // 5. + let r_0 = g.evaluate(&r); + + // 6. + // implicit because the polynomial `g` *is* equivalent to its list of + // coefficients, in the same order as in the paper. + + // 7. + if let Some(ck_tau) = ck_tau { + // implements `IPA.Verify'` without the guard + if scalar_product_g2::<E>(ck_tau, g.coeffs()) != ck_tau_0 { + return Ok(false); + } + } + + // 8. + let r_sum: E::G1 = u + .iter() + .zip(u_inv.iter()) + .zip(l_r.iter().zip(r_r.iter())) + .map(|((u_j, u_j_inv), (l_r_j, r_r_j))| { + let lhs = l_r_j.mul(u_j.mul(u_j)); + let rhs = r_r_j.mul(u_j_inv.mul(u_j_inv)); + + lhs.add(rhs) + }) + .sum(); + let g_sum: PairingOutput<E> = u + .iter() + .zip(u_inv.iter()) + .zip(l_g.iter().zip(r_g.iter())) + .map(|((u_j, u_j_inv), (l_g_j, r_g_j))| { + let lhs = l_g_j.mul(u_j.mul(u_j)); + let rhs = r_g_j.mul(u_j_inv.mul(u_j_inv)); + + lhs.add(rhs) + }) + .sum(); + + let lhs = mu_0.mul(r_0) == p.add(r_sum); + let rhs = E::pairing(mu_0, ck_tau_0) == c_g.add(g_sum); + + Ok(lhs && rhs) +} + +#[cfg(test)] +mod tests { + use ark_bls12_381::Bls12_381; + use ark_ec::pairing::{Pairing, PairingOutput}; + use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial}; + use ark_poly_commit::Error; + use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress, Validate}; + use ark_std::{test_rng, UniformRand}; + use std::ops::Div; + + use super::{is_power_of_two, Proof}; + use crate::algebra::{powers_of, scalar_product_g1, scalar_product_pairing}; + use crate::aplonk::setup; + + type UniPoly381 = DensePolynomial<<Bls12_381 as Pairing>::ScalarField>; + + // generated by ChatGPT + #[test] + fn power_of_two() { + // Powers of 2 should return true + assert!(is_power_of_two(1)); + assert!(is_power_of_two(2)); + assert!(is_power_of_two(4)); + assert!(is_power_of_two(8)); + assert!(is_power_of_two(16)); + + // Non-powers of 2 should return false + assert!(!is_power_of_two(0)); + assert!(!is_power_of_two(3)); + assert!(!is_power_of_two(5)); + assert!(!is_power_of_two(10)); + assert!(!is_power_of_two(15)); + } + + #[allow(clippy::type_complexity)] + fn test_setup<E, P>( + k: usize, + degree: usize, + ) -> Result< + ( + Vec<E::G2>, + PairingOutput<E>, + E::ScalarField, + E::G1, + Proof<E>, + ), + Error, + > + where + E: Pairing, + P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>, + for<'a, 'b> &'a P: Div<&'b P, Output = P>, + { + let rng = &mut test_rng(); + + let mu: Vec<E::G1> = (0..k).map(|_| E::G1::rand(rng)).collect(); + + let params = setup::<E, P>(degree, k)?; + let ck_tau = params.ipa.ck_tau; + + let r = E::ScalarField::rand(rng); + + let c_g = scalar_product_pairing::<E>(&mu, &ck_tau); + let p = scalar_product_g1::<E>(&mu, &powers_of::<E>(r, k)); + + let (proof, _) = super::prove::<E>(k, &ck_tau, c_g, r, p, &mu).unwrap(); + + Ok((ck_tau, c_g, r, p, proof)) + } + + fn verify_template<E, P>(k: usize, degree: usize) -> Result<(), Error> + where + E: Pairing, + P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>, + for<'a, 'b> &'a P: Div<&'b P, Output = P>, + { + let (ck_tau, c_g, r, p, proof) = test_setup::<E, P>(k, degree)?; + + assert!( + super::verify::<E, P>(k, Some(&ck_tau), c_g, r, p, &proof).unwrap(), + "IPA failed for bls12-381 and k = {}", + k + ); + + Ok(()) + } + + fn verify_with_errors_template<E, P>(k: usize, degree: usize) -> Result<(), Error> + where + E: Pairing, + P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>, + for<'a, 'b> &'a P: Div<&'b P, Output = P>, + { + let (ck_tau, c_g, r, p, proof) = test_setup::<E, P>(k, degree)?; + + let mut bytes = Vec::new(); + proof + .serialize_with_mode(&mut bytes, Compress::Yes) + .expect("Could not serialize the proof"); + bytes[10] += 1; + let proof = Proof::deserialize_with_mode(&*bytes, Compress::Yes, Validate::No) + .expect("Could not deserialize the corrupted proof"); + assert!( + !super::verify::<E, P>(k, Some(&ck_tau), c_g, r, p, &proof).unwrap(), + "IPA should fail for bls12-381, k = {} and a corrupted proof", + k + ); + + Ok(()) + } + + const DEGREE_BOUND: usize = 32; + + #[test] + fn verify() { + for k in [2, 4, 8, 16, 32] { + verify_template::<Bls12_381, UniPoly381>(k, DEGREE_BOUND).unwrap(); + } + } + + #[test] + fn verify_with_errors() { + for k in [2, 4, 8, 16, 32] { + verify_with_errors_template::<Bls12_381, UniPoly381>(k, DEGREE_BOUND).unwrap(); + } + } +} diff --git a/src/aplonk/mod.rs b/src/aplonk/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..553c0c1abc5a0784fee2bca393ff8180c288efb9 --- /dev/null +++ b/src/aplonk/mod.rs @@ -0,0 +1,565 @@ +//! aPlonK: an extension of KZG+ where commits are _folded_ into one +//! +//! > references: +//! > - [Ambrona et al., 2022](https://link.springer.com/chapter/10.1007/978-3-031-41326-1_11) +use ark_ec::{ + pairing::{Pairing, PairingOutput}, + AffineRepr, +}; +use ark_ff::{Field, PrimeField}; +use ark_poly::DenseUVPolynomial; +use ark_poly_commit::{ + kzg10::{self, Randomness, KZG10}, + PCRandomness, +}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress}; +use ark_std::{test_rng, One, UniformRand}; +use rs_merkle::algorithms::Sha256; +use rs_merkle::Hasher; +use std::marker::PhantomData; +use std::ops::{Div, Mul}; + +use crate::{algebra, error::KomodoError, fec::Shard, kzg, zk::trim}; + +mod ipa; +mod polynomial; +mod transcript; + +#[derive(Debug, Clone, Default, PartialEq, CanonicalSerialize, CanonicalDeserialize)] +pub struct Block<E: Pairing> { + pub shard: Shard<E::ScalarField>, + com_f: PairingOutput<E>, + v_hat: E::ScalarField, + mu_hat: E::G1, + kzg_proof: kzg10::Proof<E>, + ipa_proof: ipa::Proof<E>, + aplonk_proof: E::G2, +} + +/// /!\ [`Commitment`] is not [`CanonicalDeserialize`] because `P` is not [`Send`]. +#[derive(Debug, Clone, Default, PartialEq, CanonicalSerialize)] +pub struct Commitment<E, P> +where + E: Pairing, + P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>, + for<'a, 'b> &'a P: Div<&'b P, Output = P>, +{ + _engine: PhantomData<E>, + _poly: PhantomData<P>, +} + +/// /!\ [`SetupParams`] is not [`Default`] because [`kzg10::UniversalParams`] is not [`Default`]. +#[derive(Debug, Clone, PartialEq, CanonicalSerialize, CanonicalDeserialize)] +pub struct SetupParams<E: Pairing> { + pub kzg: kzg10::UniversalParams<E>, + pub ipa: ipa::Params<E>, +} + +#[derive(Debug, Clone, Default, PartialEq, CanonicalSerialize, CanonicalDeserialize)] +pub struct VerifierKey<E: Pairing> { + pub vk_psi: kzg10::VerifierKey<E>, + pub tau_1: E::G1, + pub g1: E::G1, + pub g2: E::G2, +} + +/// creates a combination of a trusted KZG and an IPA setup for [[aPlonk]] +/// +/// > **Note** +/// > this is an almost perfect translation of the *Setup* algorithm in page +/// > **13** of [aPlonk from [Ambrona et al.]][aPlonK] +/// +/// [aPlonk]: https://eprint.iacr.org/2022/1352.pdf +pub fn setup<E, P>( + degree_bound: usize, + nb_polynomials: usize, +) -> Result<SetupParams<E>, ark_poly_commit::Error> +where + E: Pairing, + P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>, + for<'a, 'b> &'a P: Div<&'b P, Output = P>, +{ + let rng = &mut test_rng(); + let params = KZG10::<E, P>::setup(degree_bound, true, rng)?; + + let g_1 = params.powers_of_g[0]; + let g_2 = params.h; + + let tau = E::ScalarField::rand(rng); + let ck_tau = algebra::powers_of::<E>(tau, nb_polynomials) + .iter() + .map(|t| g_2.mul(t)) + .collect(); + + Ok(SetupParams { + kzg: params, + ipa: ipa::Params { + ck_tau, + tau_1: g_1.mul(tau), + }, + }) +} + +pub fn commit<E, P>( + polynomials: Vec<P>, + setup: SetupParams<E>, +) -> Result<(Vec<E::G1>, PairingOutput<E>), KomodoError> +where + E: Pairing, + P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>, + for<'a, 'b> &'a P: Div<&'b P, Output = P>, +{ + let supported_degree = polynomials.iter().map(|p| p.degree()).max().unwrap_or(0); + + if setup.ipa.ck_tau.len() < polynomials.len() { + return Err(KomodoError::Other("setup error".to_string())); + } + + let (powers, _) = trim(setup.kzg, supported_degree); + + if powers.powers_of_g.len() <= supported_degree { + return Err(KomodoError::Other("setup error".to_string())); + } + + // commit.1. + let mu = match kzg::commit(&powers, &polynomials) { + Ok((mu, _)) => mu, + Err(error) => return Err(KomodoError::Other(error.to_string())), + }; + let mu: Vec<E::G1> = mu.iter().map(|c| c.0.into_group()).collect(); + + // commit.2. + let com_f: PairingOutput<E> = mu + .iter() + .enumerate() + .map(|(i, c)| E::pairing(c, setup.ipa.ck_tau[i])) + .sum(); + + Ok((mu, com_f)) +} + +pub fn prove<E, P>( + commit: (Vec<E::G1>, PairingOutput<E>), + polynomials: Vec<P>, + shards: Vec<Shard<E::ScalarField>>, + points: Vec<E::ScalarField>, + params: SetupParams<E>, +) -> Result<Vec<Block<E>>, KomodoError> +where + E: Pairing, + P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>, + for<'a, 'b> &'a P: Div<&'b P, Output = P>, +{ + assert_eq!( + shards.len(), + points.len(), + "should have same number of shards and evaluation points, found {} and {} respectively", + shards.len(), + points.len() + ); + + let (mu, com_f) = commit; + + let supported_degree = polynomials.iter().map(|p| p.degree()).max().unwrap_or(0); + let (powers, _) = trim(params.kzg, supported_degree); + + // open + let mut proofs = Vec::new(); + for (s, pt) in shards.iter().zip(points.iter()) { + let v_hat_elements = polynomials + .iter() + .map(|p| p.evaluate(pt)) + .collect::<Vec<E::ScalarField>>(); + + // open.3.1. + let mut r_bytes = vec![]; + if let Err(error) = com_f.serialize_with_mode(&mut r_bytes, Compress::Yes) { + return Err(KomodoError::Other(format!("serialization: {}", error))); + } + if let Err(error) = pt.serialize_with_mode(&mut r_bytes, Compress::Yes) { + return Err(KomodoError::Other(format!("serialization: {}", error))); + } + // FIXME: hash *com_v* here + let hash = Sha256::hash(r_bytes.as_slice()); + let r = E::ScalarField::from_le_bytes_mod_order(&hash); + + // open.3.2. + let r_vec = algebra::powers_of::<E>(r, polynomials.len()); + // open.3.3 + let f = algebra::scalar_product_polynomial::<E, P>(&r_vec, &polynomials); + // open.3.4. + let mu_hat: E::G1 = algebra::scalar_product_g1::<E>(&mu, &r_vec); + // open.3.5. + let v_hat: E::ScalarField = algebra::scalar_product::<E>(&v_hat_elements, &r_vec); + + // open.4. + let kzg_proof = match KZG10::<E, P>::open( + &powers, + &f, + *pt, + &Randomness::<E::ScalarField, P>::empty(), + ) { + Ok(proof) => proof, + Err(error) => return Err(KomodoError::Other(format!("ark error: {}", error))), + }; + + // open.5. + // we do no need this step as we already share the shards on the network + + // open.6. + let (ipa_proof, u) = + match ipa::prove(polynomials.len(), ¶ms.ipa.ck_tau, com_f, r, mu_hat, &mu) { + Ok(proof) => proof, + Err(error) => return Err(error), + }; + let mut u_inv = Vec::new(); + for u_i in &u { + if let Some(inverse) = u_i.inverse() { + u_inv.push(inverse) + } else { + return Err(KomodoError::Other("EllipticInverseError".to_string())); + } + } + + // open.7.1. + let kappa = f64::log2(polynomials.len() as f64) as usize; + let g = polynomial::compute_g::<E, P>(polynomials.len(), kappa, &u, &u_inv); + // open.7.2. + let mut rho_bytes = vec![]; + if let Err(error) = ipa_proof.serialize_with_mode(&mut rho_bytes, Compress::Yes) { + return Err(KomodoError::Other(format!("SerializationError: {}", error))); + } + let rho = E::ScalarField::from_le_bytes_mod_order(&Sha256::hash(rho_bytes.as_slice())); + // open.7.3. + // implicit in the computation of the witness polynomial + + // open.8.1. + let h = match KZG10::<E, P>::compute_witness_polynomial( + &g, + rho, + &Randomness::<E::ScalarField, P>::empty(), + ) { + Ok((h, _)) => h, + Err(error) => return Err(KomodoError::Other(format!("ArkError: {}", error))), + }; + // open.8.2. + let aplonk_proof = h + .coeffs() + .iter() + .enumerate() + .map(|(i, hi)| params.ipa.ck_tau[i].mul(hi)) + .sum(); + + // open.9. + proofs.push(Block { + shard: s.clone(), + com_f, + v_hat, + mu_hat, + kzg_proof, + ipa_proof, + aplonk_proof, + }); + } + + Ok(proofs) +} + +pub fn verify<E, P>( + block: &Block<E>, + pt: E::ScalarField, + vk_psi: &kzg10::VerifierKey<E>, + tau_1: E::G1, + g_1: E::G1, + g_2: E::G2, +) -> Result<bool, KomodoError> +where + E: Pairing, + P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>, + for<'a, 'b> &'a P: Div<&'b P, Output = P>, +{ + // check.1. + let mut bytes = vec![]; + if let Err(error) = block.com_f.serialize_with_mode(&mut bytes, Compress::Yes) { + return Err(KomodoError::Other(format!("SerializationError: {}", error))); + } + if let Err(error) = pt.serialize_with_mode(&mut bytes, Compress::Yes) { + return Err(KomodoError::Other(format!("SerializationError: {}", error))); + } + // FIXME: hash *com_v* here + let hash = Sha256::hash(bytes.as_slice()); + let r = E::ScalarField::from_le_bytes_mod_order(&hash); + + // check.2. + let p1 = block.mu_hat - vk_psi.g.mul(block.v_hat); + let inner = vk_psi.beta_h.into_group() - vk_psi.h.mul(&pt); + if E::pairing(p1, vk_psi.h) != E::pairing(block.kzg_proof.w, inner) { + return Ok(false); + } + + // TODO: missing part of the aplonk algorithm + // check.3. + + let nb_polynomials = block.shard.size + / (block.shard.k as usize) + / (E::ScalarField::MODULUS_BIT_SIZE as usize / 8); + + // check.4. + if !ipa::verify( + nb_polynomials, + None, // we call *IPA.Verify'* here + block.com_f, + r, + block.mu_hat, + &block.ipa_proof, + )? { + return Ok(false); + } + + // check.5.1. + let mut bytes = vec![]; + if let Err(error) = block + .ipa_proof + .serialize_with_mode(&mut bytes, Compress::Yes) + { + return Err(KomodoError::Other(format!("SerializationError: {}", error))); + } + let hash = Sha256::hash(bytes.as_slice()); + let rho = E::ScalarField::from_le_bytes_mod_order(&hash); + + let kappa = f64::log2(nb_polynomials as f64) as usize; + let mut ts = match transcript::initialize(block.com_f, r, block.mu_hat) { + Ok(transcript) => transcript, + Err(error) => return Err(KomodoError::Other(format!("SerializationError: {}", error))), + }; + let mut u = algebra::vector::zero::<E::ScalarField>(kappa); + for j in (0..kappa).rev() { + u[j] = match transcript::hash( + block.ipa_proof.l_g[j], + block.ipa_proof.r_g[j], + block.ipa_proof.l_r[j], + block.ipa_proof.r_r[j], + &ts, + ) { + Ok(hash) => hash, + Err(error) => return Err(KomodoError::Other(format!("SerializationError: {}", error))), + }; + ts = match transcript::reset::<E>(u[j]) { + Ok(transcript) => transcript, + Err(error) => return Err(KomodoError::Other(format!("SerializationError: {}", error))), + }; + } + + let mut u_inv = Vec::new(); + for u_i in &u { + if let Some(inverse) = u_i.inverse() { + u_inv.push(inverse) + } else { + return Err(KomodoError::Other("EllipticInverseError".to_string())); + } + } + + // check.5.2. + let g = polynomial::compute_g::<E, P>(nb_polynomials, kappa, &u, &u_inv); + let v_rho = g.evaluate(&rho); + + // check.6. + let lhs = E::pairing(tau_1 - g_1.mul(rho), block.aplonk_proof); + let rhs = E::pairing( + g_1.mul(E::ScalarField::one()), + block.ipa_proof.ck_tau_0 - g_2.mul(v_rho), + ); + let b_tau = lhs == rhs; + + // check.7. + // the formula is implicit because here + // - b_psi has passed in check.2. + // - b_v is skipped for now + // - b_IPA has passed in check.4. + Ok(b_tau) +} + +#[cfg(test)] +mod tests { + use super::{commit, prove, setup, Block}; + use crate::{conversions::u32_to_u8_vec, fec::encode, field, linalg::Matrix, zk::trim}; + + use ark_bls12_381::Bls12_381; + use ark_ec::{pairing::Pairing, AffineRepr}; + use ark_ff::{Field, PrimeField}; + use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial}; + use ark_poly_commit::kzg10; + use std::ops::{Div, MulAssign}; + + type UniPoly381 = DensePolynomial<<Bls12_381 as Pairing>::ScalarField>; + + fn bytes<E: Pairing>(k: usize, nb_polynomials: usize) -> Vec<u8> { + let nb_bytes = k * nb_polynomials * (E::ScalarField::MODULUS_BIT_SIZE as usize / 8); + include_bytes!("../../assets/dragoon_133x133.png")[0..nb_bytes].to_vec() + } + + #[allow(clippy::type_complexity)] + fn test_setup<E, P>( + bytes: &[u8], + k: usize, + n: usize, + ) -> Result< + ( + Vec<Block<E>>, + (kzg10::VerifierKey<E>, E::G1), + (E::G1, E::G2), + ), + ark_poly_commit::Error, + > + where + E: Pairing, + P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>, + for<'a, 'b> &'a P: Div<&'b P, Output = P>, + { + let degree = k - 1; + let vector_length_bound = + bytes.len() / (E::ScalarField::MODULUS_BIT_SIZE as usize / 8) / (degree + 1); + + let params = setup::<E, P>(degree, vector_length_bound)?; + let (_, vk_psi) = trim(params.kzg.clone(), degree); + + let elements = field::split_data_into_field_elements::<E::ScalarField>(bytes, k); + let mut polynomials = Vec::new(); + for chunk in elements.chunks(k) { + polynomials.push(P::from_coefficients_vec(chunk.to_vec())) + } + + let commit = commit(polynomials.clone(), params.clone()).unwrap(); + + let encoding_points = &(0..n) + .map(|i| E::ScalarField::from_le_bytes_mod_order(&i.to_le_bytes())) + .collect::<Vec<_>>(); + let encoding_mat = Matrix::vandermonde_unchecked(encoding_points, k); + let shards = encode::<E::ScalarField>(bytes, &encoding_mat) + .unwrap_or_else(|_| panic!("could not encode")); + + let blocks = prove::<E, P>( + commit, + polynomials, + shards, + encoding_points.clone(), + params.clone(), + ) + .unwrap(); + + Ok(( + blocks, + (vk_psi, params.ipa.tau_1), + ( + params.kzg.powers_of_g[0].into_group(), + params.kzg.h.into_group(), + ), + )) + } + + fn verify_template<E, P>(bytes: &[u8], k: usize, n: usize) -> Result<(), ark_poly_commit::Error> + where + E: Pairing, + P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>, + for<'a, 'b> &'a P: Div<&'b P, Output = P>, + { + let (blocks, (vk_psi, tau_1), (g_1, g_2)) = + test_setup::<E, P>(bytes, k, n).expect("proof failed for bls12-381"); + + for (i, block) in blocks.iter().enumerate() { + assert!(super::verify::<E, P>( + block, + E::ScalarField::from_le_bytes_mod_order(&u32_to_u8_vec(i as u32)), + &vk_psi, + tau_1, + g_1, + g_2 + ) + .unwrap()); + } + + Ok(()) + } + + fn verify_with_errors_template<E, P>( + bytes: &[u8], + k: usize, + n: usize, + ) -> Result<(), ark_poly_commit::Error> + where + E: Pairing, + P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>, + for<'a, 'b> &'a P: Div<&'b P, Output = P>, + { + let (blocks, (vk_psi, tau_1), (g_1, g_2)) = + test_setup::<E, P>(bytes, k, n).expect("proof failed for bls12-381"); + + for (i, block) in blocks.iter().enumerate() { + let mut b = block.clone(); + // modify a field in the struct b to corrupt the block proof without corrupting the data serialization + let a = E::ScalarField::from_le_bytes_mod_order(&[123]); + b.ipa_proof.l_r[0].mul_assign(a.pow([4321_u64])); + + assert!( + !super::verify::<E, P>( + &b, + E::ScalarField::from_le_bytes_mod_order(&u32_to_u8_vec(i as u32)), + &vk_psi, + tau_1, + g_1, + g_2 + ) + .unwrap(), + "aPlonK should fail for bls12-381, k = {} and a corrupted block", + k + ); + } + + Ok(()) + } + + #[test] + fn verify_2() { + verify_template::<Bls12_381, UniPoly381>(&bytes::<Bls12_381>(4, 2), 4, 6) + .expect("verification failed for bls12-381"); + } + + #[test] + fn verify_4() { + verify_template::<Bls12_381, UniPoly381>(&bytes::<Bls12_381>(4, 4), 4, 6) + .expect("verification failed for bls12-381"); + } + + #[test] + fn verify_8() { + verify_template::<Bls12_381, UniPoly381>(&bytes::<Bls12_381>(4, 8), 4, 6) + .expect("verification failed for bls12-381"); + } + + #[test] + fn verify_with_errors_2() { + verify_with_errors_template::<Bls12_381, UniPoly381>(&bytes::<Bls12_381>(4, 2), 4, 6) + .expect("verification failed for bls12-381"); + } + + #[test] + fn verify_with_errors_4() { + verify_with_errors_template::<Bls12_381, UniPoly381>(&bytes::<Bls12_381>(4, 4), 4, 6) + .expect("verification failed for bls12-381"); + } + + #[test] + fn verify_with_errors_8() { + verify_with_errors_template::<Bls12_381, UniPoly381>(&bytes::<Bls12_381>(4, 8), 4, 6) + .expect("verification failed for bls12-381"); + } + + // TODO: implement padding for aPlonK + #[ignore = "padding not supported by aPlonK"] + #[test] + fn verify_with_padding_test() { + let bytes = bytes::<Bls12_381>(4, 2); + verify_template::<Bls12_381, UniPoly381>(&bytes[0..(bytes.len() - 33)], 4, 6) + .expect("verification failed for bls12-381 with padding"); + } +} diff --git a/src/aplonk/polynomial.rs b/src/aplonk/polynomial.rs new file mode 100644 index 0000000000000000000000000000000000000000..e51508e0c14add54e85e659d3aa7b7a7ed0a40a9 --- /dev/null +++ b/src/aplonk/polynomial.rs @@ -0,0 +1,195 @@ +use ark_ec::pairing::Pairing; +use ark_poly::DenseUVPolynomial; +use ark_std::One; +use std::ops::{Div, Mul}; + +/// convert a number to it's binary representation with 0-padding to the right +fn to_binary(number: usize, width: usize) -> Vec<u8> { + format!("{:0width$b}", number, width = width) + .bytes() + .rev() + .map(|byte| byte - b'0') + .collect() +} + +/// compute the polynomial *g(X)* in [aPlonk from [Ambrona et al.]][aPlonk] +/// +/// *g(X)* can be found, at +/// - page **13**. in *open.7* +/// - page **13**. in *check.5* +/// - page **15**. in *IPA.verify.4* +/// +/// it's theoretical formula is the following (modified version): +/// *g(X) = \Pi_{j=1}^{\kappa = log_2(k)}(u_j^{-1} + u_j X^{2^j})* +/// +/// however this formula is not very convenient, so let's expand this and +/// compute all the coefficients! +/// when we do that on small examples: +/// - *\kappa = 1*: * +/// g(X) = (u_0^{-1} + u_0 X) +/// = u_0^{-1} + +/// u_0 X +/// * +/// - *\kappa = 2*: * +/// g(X) = (u_0^{-1} + u_0 X)(u_1^{-1} + u_1 X^2) +/// = u_1^{-1} u_0^{-1} + +/// u_1^{-1} u_0 X + +/// u_1 u_0^{-1} X^2 + +/// u_1 u_0 X^3 +/// * +/// - *\kappa = 3*: * +/// g(X) = (u_0^{-1} + u_0 X)(u_1^{-1} + u_1 X^2)(u_2^{-1} + u_2 X^2) +/// = u_2^{-1} u_1^{-1} u_0^{-1} + +/// u_2^{-1} u_1^{-1} u_0 X + +/// u_2^{-1} u_1 u_0^{-1} X^2 + +/// u_2^{-1} u_1 u_0 X^3 + +/// u_2 u_1^{-1} u_0^{-1} X^4 + +/// u_2 u_1^{-1} u_0 X^5 + +/// u_2 u_1 u_0^{-1} X^6 + +/// u_2 u_1 u_0 X^7 +/// * +/// +/// we can see that the *j*-the coefficient of *g(X)* for a given *\kappa* is +/// a product of a combination of *(u_i)* and their inverse elements directly +/// related to the binary representation of the *j* polynomial power, e.g. +/// - with *\kappa = 3* and *j = 6*, the binary is *110* and the coefficient is +/// *u_0 \times u_1 \times u_2^{-1}* +/// +/// [aPlonk]: https://eprint.iacr.org/2022/1352.pdf +pub(super) fn compute_g<E, P>( + k: usize, + kappa: usize, + u: &[E::ScalarField], + u_inv: &[E::ScalarField], +) -> P +where + E: Pairing, + P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>, + for<'a, 'b> &'a P: Div<&'b P, Output = P>, +{ + let coefficients = (0..k) + .map(|j| { + to_binary(j, kappa) + .iter() + .enumerate() + .map(|(i, bit)| if *bit == 1u8 { u[i] } else { u_inv[i] }) + .fold(E::ScalarField::one(), |acc, it| acc.mul(it)) + }) + .collect::<Vec<P::Point>>(); + + P::from_coefficients_vec(coefficients) +} + +#[cfg(test)] +mod tests { + use std::ops::Div; + use std::ops::Mul; + + use ark_bls12_381::Bls12_381; + use ark_ec::pairing::Pairing; + use ark_ff::Field; + use ark_poly::univariate::DensePolynomial; + use ark_poly::DenseUVPolynomial; + use ark_std::test_rng; + use ark_std::UniformRand; + + use super::compute_g; + use super::to_binary; + + type UniPoly381 = DensePolynomial<<Bls12_381 as Pairing>::ScalarField>; + + #[test] + fn to_binary_conversion() { + assert_eq!(to_binary(2, 4), vec![0, 1, 0, 0]); + assert_eq!(to_binary(10, 4), vec![0, 1, 0, 1]); + assert_eq!(to_binary(5, 8), vec![1, 0, 1, 0, 0, 0, 0, 0]); + } + + fn g_poly_computation_template<E, P>() + where + E: Pairing, + P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>, + for<'a, 'b> &'a P: Div<&'b P, Output = P>, + { + let rng = &mut test_rng(); + + let u = [ + E::ScalarField::rand(rng), + E::ScalarField::rand(rng), + E::ScalarField::rand(rng), + E::ScalarField::rand(rng), + ]; + let u_inv: Vec<E::ScalarField> = u.iter().map(|u_i| u_i.inverse().unwrap()).collect(); + + let expected_coefficients = vec![u_inv[0], u[0]]; + assert_eq!( + compute_g::<E, P>(2, 1, &u[..1], &u_inv[..1]), + P::from_coefficients_vec(expected_coefficients), + "computation of *g(X)* failed for k = {}", + 1 + ); + + #[rustfmt::skip] + let expected_coefficients = vec![ + u_inv[0].mul(u_inv[1]), + u[0].mul(u_inv[1]), + u_inv[0].mul( u[1]), + u[0].mul( u[1]), + ]; + assert_eq!( + compute_g::<E, P>(4, 2, &u[..2], &u_inv[..2]), + P::from_coefficients_vec(expected_coefficients), + "computation of *g(X)* failed for k = {}", + 2 + ); + + #[rustfmt::skip] + let expected_coefficients = vec![ + u_inv[0].mul(u_inv[1]).mul(u_inv[2]), + u[0].mul(u_inv[1]).mul(u_inv[2]), + u_inv[0].mul( u[1]).mul(u_inv[2]), + u[0].mul( u[1]).mul(u_inv[2]), + u_inv[0].mul(u_inv[1]).mul( u[2]), + u[0].mul(u_inv[1]).mul( u[2]), + u_inv[0].mul( u[1]).mul( u[2]), + u[0].mul( u[1]).mul( u[2]), + ]; + assert_eq!( + compute_g::<E, P>(8, 3, &u[..3], &u_inv[..3]), + P::from_coefficients_vec(expected_coefficients), + "computation of *g(X)* failed for k = {}", + 3 + ); + + #[rustfmt::skip] + let expected_coefficients = vec![ + u_inv[0].mul(u_inv[1]).mul(u_inv[2]).mul(u_inv[3]), + u[0].mul(u_inv[1]).mul(u_inv[2]).mul(u_inv[3]), + u_inv[0].mul( u[1]).mul(u_inv[2]).mul(u_inv[3]), + u[0].mul( u[1]).mul(u_inv[2]).mul(u_inv[3]), + u_inv[0].mul(u_inv[1]).mul( u[2]).mul(u_inv[3]), + u[0].mul(u_inv[1]).mul( u[2]).mul(u_inv[3]), + u_inv[0].mul( u[1]).mul( u[2]).mul(u_inv[3]), + u[0].mul( u[1]).mul( u[2]).mul(u_inv[3]), + u_inv[0].mul(u_inv[1]).mul(u_inv[2]).mul( u[3]), + u[0].mul(u_inv[1]).mul(u_inv[2]).mul( u[3]), + u_inv[0].mul( u[1]).mul(u_inv[2]).mul( u[3]), + u[0].mul( u[1]).mul(u_inv[2]).mul( u[3]), + u_inv[0].mul(u_inv[1]).mul( u[2]).mul( u[3]), + u[0].mul(u_inv[1]).mul( u[2]).mul( u[3]), + u_inv[0].mul( u[1]).mul( u[2]).mul( u[3]), + u[0].mul( u[1]).mul( u[2]).mul( u[3]), + ]; + assert_eq!( + compute_g::<E, P>(16, 4, &u[..4], &u_inv[..4]), + P::from_coefficients_vec(expected_coefficients), + "computation of *g(X)* failed for k = {}", + 4 + ); + } + + #[test] + fn g_poly_computation() { + g_poly_computation_template::<Bls12_381, UniPoly381>(); + } +} diff --git a/src/aplonk/transcript.rs b/src/aplonk/transcript.rs new file mode 100644 index 0000000000000000000000000000000000000000..8b0b8e149a3c4b0ea6ec4e8de58c3660a1d6b397 --- /dev/null +++ b/src/aplonk/transcript.rs @@ -0,0 +1,64 @@ +use ark_ec::pairing::{Pairing, PairingOutput}; +use ark_ff::PrimeField; +use ark_serialize::{CanonicalSerialize, Compress, SerializationError}; +use rs_merkle::{algorithms::Sha256, Hasher}; + +/// initialize the transcript of IPA +/// +/// this can be found in [aPlonk from [Ambrona et al.]][aPlonK] as *ts := (C_G, r, P)* in +/// - page **15**. in IPA.Prove.1. +/// +/// [aPlonk]: https://eprint.iacr.org/2022/1352.pdf +pub(super) fn initialize<E: Pairing>( + c_g: PairingOutput<E>, + r: E::ScalarField, + p: E::G1, +) -> Result<Vec<u8>, SerializationError> { + let mut ts = vec![]; + c_g.serialize_with_mode(&mut ts, Compress::Yes)?; + r.serialize_with_mode(&mut ts, Compress::Yes)?; + p.serialize_with_mode(&mut ts, Compress::Yes)?; + + Ok(ts) +} + +/// reset the transcript of IPA +/// +/// this can be found in [aPlonk from [Ambrona et al.]][aPlonK] as *ts := u_j* in +/// - page **15**. in IPA.Prove.5. +/// - page **15**. in IPA.Verify.3. +/// +/// [aPlonk]: https://eprint.iacr.org/2022/1352.pdf +pub(super) fn reset<E: Pairing>(u: E::ScalarField) -> Result<Vec<u8>, SerializationError> { + let mut ts = vec![]; + u.serialize_with_mode(&mut ts, Compress::Yes)?; + + Ok(ts) +} + +/// hash curve elements into the transcript of IPA +/// +/// this can be found in [aPlonk from [Ambrona et al.]][aPlonK] as +/// *ts := Hash(L_G^j, R_G^j, L_r^j, R_r^j, ts)* in +/// - page **15**. in IPA.Prove.5. +/// - page **15**. in IPA.Verify.3. +/// +/// [aPlonk]: https://eprint.iacr.org/2022/1352.pdf +pub(super) fn hash<E: Pairing>( + l_g_j: PairingOutput<E>, + r_g_j: PairingOutput<E>, + l_r_j: E::G1, + r_r_j: E::G1, + ts: &[u8], +) -> Result<E::ScalarField, SerializationError> { + let mut bytes = vec![]; + l_g_j.serialize_with_mode(&mut bytes, Compress::Yes)?; + r_g_j.serialize_with_mode(&mut bytes, Compress::Yes)?; + l_r_j.serialize_with_mode(&mut bytes, Compress::Yes)?; + r_r_j.serialize_with_mode(&mut bytes, Compress::Yes)?; + bytes.extend(ts); + + Ok(E::ScalarField::from_le_bytes_mod_order(&Sha256::hash( + bytes.as_slice(), + ))) +} diff --git a/src/conversions.rs b/src/conversions.rs new file mode 100644 index 0000000000000000000000000000000000000000..e6ae5b93e32fff30bef968968a933fecf33cad0e --- /dev/null +++ b/src/conversions.rs @@ -0,0 +1,21 @@ +#[cfg(test)] +pub(crate) fn u32_to_u8_vec(num: u32) -> Vec<u8> { + vec![ + (num & 0xFF) as u8, + ((num >> 8) & 0xFF) as u8, + ((num >> 16) & 0xFF) as u8, + ((num >> 24) & 0xFF) as u8, + ] +} + +#[cfg(test)] +mod tests { + #[test] + fn u32_to_u8_convertion() { + assert_eq!(super::u32_to_u8_vec(0u32), vec![0u8, 0u8, 0u8, 0u8]); + assert_eq!(super::u32_to_u8_vec(1u32), vec![1u8, 0u8, 0u8, 0u8]); + assert_eq!(super::u32_to_u8_vec(256u32), vec![0u8, 1u8, 0u8, 0u8]); + assert_eq!(super::u32_to_u8_vec(65536u32), vec![0u8, 0u8, 1u8, 0u8]); + assert_eq!(super::u32_to_u8_vec(16777216u32), vec![0u8, 0u8, 0u8, 1u8]); + } +} diff --git a/src/kzg.rs b/src/kzg.rs index a4dade2bba4b083d2ad405b68e0f9e01c879f736..44d10cf7b2f7e50dadba8cac4f7e8d6ee3797877 100644 --- a/src/kzg.rs +++ b/src/kzg.rs @@ -229,7 +229,7 @@ mod tests { use ark_std::test_rng; use std::ops::{Div, Mul}; - use crate::{fec::encode, field, linalg::Matrix, zk::trim}; + use crate::{conversions::u32_to_u8_vec, fec::encode, field, linalg::Matrix, zk::trim}; type UniPoly381 = DensePolynomial<<Bls12_381 as Pairing>::ScalarField>; @@ -238,24 +238,6 @@ mod tests { include_bytes!("../assets/dragoon_133x133.png")[0..nb_bytes].to_vec() } - fn u32_to_u8_vec(num: u32) -> Vec<u8> { - vec![ - (num & 0xFF) as u8, - ((num >> 8) & 0xFF) as u8, - ((num >> 16) & 0xFF) as u8, - ((num >> 24) & 0xFF) as u8, - ] - } - - #[test] - fn u32_to_u8_convertion() { - assert_eq!(u32_to_u8_vec(0u32), vec![0u8, 0u8, 0u8, 0u8]); - assert_eq!(u32_to_u8_vec(1u32), vec![1u8, 0u8, 0u8, 0u8]); - assert_eq!(u32_to_u8_vec(256u32), vec![0u8, 1u8, 0u8, 0u8]); - assert_eq!(u32_to_u8_vec(65536u32), vec![0u8, 0u8, 1u8, 0u8]); - assert_eq!(u32_to_u8_vec(16777216u32), vec![0u8, 0u8, 0u8, 1u8]); - } - fn test_setup<E, P>( bytes: &[u8], k: usize, diff --git a/src/lib.rs b/src/lib.rs index 2a914cdb7d63b0a09ef3b30be0db3a8d17acd4fe..1e6f077a546f388b187525112b7a41e8ced3c1e8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,6 +1,10 @@ //! Komodo: Cryptographically-proven Erasure Coding -#[cfg(feature = "kzg")] +#[cfg(any(feature = "kzg", feature = "aplonk"))] mod algebra; +#[cfg(feature = "aplonk")] +pub mod aplonk; +#[cfg(any(feature = "kzg", feature = "aplonk"))] +mod conversions; pub mod error; pub mod fec; pub mod field;