diff --git a/Cargo.toml b/Cargo.toml
index af1c6f889a6fc81eeb988ac84ddb66bbdfa40c1a..cb61aef17efc92101aae71f9504197e69d6c52d9 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -11,7 +11,6 @@ ark-bls12-381 = "0.4.0"
 ark-ec = "0.4.2"
 ark-ff = "0.4.2"
 ark-poly = "0.4.2"
-ark-poly-commit = "0.4.0"
 ark-serialize = "0.4.2"
 ark-std = "0.4.0"
 rand = "0.8.5"
diff --git a/benches/linalg.rs b/benches/linalg.rs
index 21528738804647952acc5c97c1cf4a847c08fe30..437b176fecc72baa8339a4e203bf2356f44a022d 100644
--- a/benches/linalg.rs
+++ b/benches/linalg.rs
@@ -1,52 +1,52 @@
-use ark_bls12_381::Bls12_381;
-use ark_ec::pairing::Pairing;
+use ark_bls12_381::Fr;
+use ark_ff::PrimeField;
 use criterion::{black_box, criterion_group, criterion_main, Criterion};
 
 use komodo::linalg::Matrix;
 
-fn inverse_template<E: Pairing>(c: &mut Criterion, n: usize) {
-    let matrix = Matrix::<E::ScalarField>::random(n, n);
+fn inverse_template<F: PrimeField>(c: &mut Criterion, n: usize) {
+    let matrix = Matrix::<F>::random(n, n);
 
     c.bench_function(
-        &format!("inverse {}x{} on {}", n, n, std::any::type_name::<E>()),
+        &format!("inverse {}x{} on {}", n, n, std::any::type_name::<F>()),
         |b| b.iter(|| matrix.invert().unwrap()),
     );
 }
 
 fn inverse(c: &mut Criterion) {
     for n in [10, 15, 20, 30, 40, 60, 80, 120, 160, 240, 320] {
-        inverse_template::<Bls12_381>(c, black_box(n));
+        inverse_template::<Fr>(c, black_box(n));
     }
 }
 
-fn transpose_template<E: Pairing>(c: &mut Criterion, n: usize) {
-    let matrix = Matrix::<E::ScalarField>::random(n, n);
+fn transpose_template<F: PrimeField>(c: &mut Criterion, n: usize) {
+    let matrix = Matrix::<F>::random(n, n);
 
     c.bench_function(
-        &format!("transpose {}x{} on {}", n, n, std::any::type_name::<E>()),
+        &format!("transpose {}x{} on {}", n, n, std::any::type_name::<F>()),
         |b| b.iter(|| matrix.transpose()),
     );
 }
 
 fn transpose(c: &mut Criterion) {
     for n in [10, 15, 20, 30, 40, 60, 80, 120, 160, 240, 320] {
-        transpose_template::<Bls12_381>(c, black_box(n));
+        transpose_template::<Fr>(c, black_box(n));
     }
 }
 
-fn mul_template<E: Pairing>(c: &mut Criterion, n: usize) {
-    let mat_a = Matrix::<E::ScalarField>::random(n, n);
-    let mat_b = Matrix::<E::ScalarField>::random(n, n);
+fn mul_template<F: PrimeField>(c: &mut Criterion, n: usize) {
+    let mat_a = Matrix::<F>::random(n, n);
+    let mat_b = Matrix::<F>::random(n, n);
 
     c.bench_function(
-        &format!("mul {}x{} on {}", n, n, std::any::type_name::<E>()),
+        &format!("mul {}x{} on {}", n, n, std::any::type_name::<F>()),
         |b| b.iter(|| mat_a.mul(&mat_b)),
     );
 }
 
 fn mul(c: &mut Criterion) {
     for n in [10, 15, 20, 30, 40, 60, 80, 120, 160, 240, 320] {
-        mul_template::<Bls12_381>(c, black_box(n));
+        mul_template::<Fr>(c, black_box(n));
     }
 }
 
diff --git a/benches/recoding.rs b/benches/recoding.rs
index 5769f10c624fb0ac0d0f06a8c982439beda41fe1..ed237bddd043ccbb4125e8bacacffc415bb42562 100644
--- a/benches/recoding.rs
+++ b/benches/recoding.rs
@@ -1,5 +1,4 @@
-use ark_bls12_381::Bls12_381;
-use ark_ec::pairing::Pairing;
+use ark_bls12_381::Fr;
 use ark_ff::PrimeField;
 
 use rand::Rng;
@@ -11,34 +10,33 @@ use komodo::{
 
 use criterion::{criterion_group, criterion_main, Criterion};
 
-fn to_curve<E: Pairing>(n: u128) -> E::ScalarField {
-    E::ScalarField::from_le_bytes_mod_order(&n.to_le_bytes())
+fn to_curve<F: PrimeField>(n: u128) -> F {
+    F::from_le_bytes_mod_order(&n.to_le_bytes())
 }
 
-fn create_fake_shard<E: Pairing>(nb_bytes: usize, k: usize) -> Shard<E> {
+fn create_fake_shard<F: PrimeField>(nb_bytes: usize, k: usize) -> Shard<F> {
     let mut rng = rand::thread_rng();
     let bytes: Vec<u8> = (0..nb_bytes).map(|_| rng.gen::<u8>()).collect();
 
-    let linear_combination: Vec<E::ScalarField> =
-        (0..k).map(|_| to_curve::<E>(rng.gen::<u128>())).collect();
+    let linear_combination: Vec<F> = (0..k).map(|_| to_curve::<F>(rng.gen::<u128>())).collect();
 
     Shard {
         k: k as u32,
         linear_combination,
         hash: vec![],
-        data: field::split_data_into_field_elements::<E>(&bytes, 1),
+        data: field::split_data_into_field_elements::<F>(&bytes, 1),
         size: 0,
     }
 }
 
-fn bench_template<E: Pairing>(c: &mut Criterion, nb_bytes: usize, k: usize, nb_shards: usize) {
-    let shards: Vec<Shard<E>> = (0..nb_shards)
+fn bench_template<F: PrimeField>(c: &mut Criterion, nb_bytes: usize, k: usize, nb_shards: usize) {
+    let shards: Vec<Shard<F>> = (0..nb_shards)
         .map(|_| create_fake_shard(nb_bytes, k))
         .collect();
 
     let mut rng = rand::thread_rng();
-    let coeffs: Vec<E::ScalarField> = (0..nb_shards)
-        .map(|_| to_curve::<E>(rng.gen::<u128>()))
+    let coeffs: Vec<F> = (0..nb_shards)
+        .map(|_| to_curve::<F>(rng.gen::<u128>()))
         .collect();
 
     c.bench_function(
@@ -54,7 +52,7 @@ fn criterion_benchmark(c: &mut Criterion) {
     for nb_bytes in [1, 1_024, 1_024 * 1_024] {
         for nb_shards in [2, 4, 8, 16] {
             for k in [2, 4, 8, 16] {
-                bench_template::<Bls12_381>(c, nb_bytes, k, nb_shards);
+                bench_template::<Fr>(c, nb_bytes, k, nb_shards);
             }
         }
     }
diff --git a/benches/setup.rs b/benches/setup.rs
index 3df93d889511cdebcc90200b6f47bad1c9783e48..c95b9d178d054e420d3a3d1e83273782a7152915 100644
--- a/benches/setup.rs
+++ b/benches/setup.rs
@@ -1,36 +1,41 @@
 use std::ops::Div;
 
-use ark_bls12_381::Bls12_381;
-use ark_ec::pairing::Pairing;
+use ark_bls12_381::{Fr, G1Projective};
+use ark_ec::CurveGroup;
+use ark_ff::PrimeField;
 use ark_poly::univariate::DensePolynomial;
 use ark_poly::DenseUVPolynomial;
 
-use ark_poly_commit::kzg10::Powers;
 use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress, Validate};
+use ark_std::test_rng;
 use criterion::{black_box, criterion_group, criterion_main, Criterion};
+use komodo::zk::{self, Powers};
 
-type UniPoly12_381 = DensePolynomial<<Bls12_381 as Pairing>::ScalarField>;
+type UniPoly12_381 = DensePolynomial<Fr>;
 
-fn setup_template<E, P>(c: &mut Criterion, nb_bytes: usize)
+fn setup_template<F, G, P>(c: &mut Criterion, nb_bytes: usize)
 where
-    E: Pairing,
-    P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>,
+    F: PrimeField,
+    G: CurveGroup<ScalarField = F>,
+    P: DenseUVPolynomial<F, Point = F>,
     for<'a, 'b> &'a P: Div<&'b P, Output = P>,
 {
     let mut group = c.benchmark_group("setup");
 
+    let rng = &mut test_rng();
+
     group.bench_function(
-        &format!("setup {} on {}", nb_bytes, std::any::type_name::<E>()),
-        |b| b.iter(|| komodo::setup::random::<E, P>(nb_bytes).unwrap()),
+        &format!("setup {} on {}", nb_bytes, std::any::type_name::<F>()),
+        |b| b.iter(|| zk::setup::<_, F, G>(nb_bytes, rng).unwrap()),
     );
 
-    let setup = komodo::setup::random::<E, P>(nb_bytes).unwrap();
+    let setup = zk::setup::<_, F, G>(nb_bytes, rng).unwrap();
 
     group.bench_function(
         &format!(
             "serializing with compression {} on {}",
             nb_bytes,
-            std::any::type_name::<E>()
+            std::any::type_name::<F>()
         ),
         |b| {
             b.iter(|| {
@@ -46,7 +51,7 @@ where
         &format!(
             "serializing with no compression {} on {}",
             nb_bytes,
-            std::any::type_name::<E>()
+            std::any::type_name::<F>()
         ),
         |b| {
             b.iter(|| {
@@ -80,7 +85,7 @@ where
                 Validate::Yes => "validation",
                 Validate::No => "no validation",
             },
-            std::any::type_name::<E>(),
+            std::any::type_name::<F>(),
             serialized.len(),
         );
 
@@ -96,11 +101,15 @@ where
                     Validate::No => "no validation",
                 },
                 nb_bytes,
-                std::any::type_name::<E>()
+                std::any::type_name::<F>()
             ),
             |b| {
                 b.iter(|| {
-                    Powers::<Bls12_381>::deserialize_with_mode(&serialized[..], compress, validate)
+                    Powers::<Fr, G1Projective>::deserialize_with_mode(
+                        &serialized[..],
+                        compress,
+                        validate,
+                    )
                 })
             },
         );
@@ -111,7 +120,7 @@ where
 
 fn setup(c: &mut Criterion) {
     for n in [1, 2, 4, 8, 16] {
-        setup_template::<Bls12_381, UniPoly12_381>(c, black_box(n * 1024));
+        setup_template::<Fr, G1Projective, UniPoly12_381>(c, black_box(n * 1024));
     }
 }
 
diff --git a/examples/bench_setup_size.rs b/examples/bench_setup_size.rs
index 1c9321bdc168e16154a4866108fccc1999d6fc92..8e614a562851bf3476cb434f983ce6f7e0083ea8 100644
--- a/examples/bench_setup_size.rs
+++ b/examples/bench_setup_size.rs
@@ -1,21 +1,27 @@
 use std::ops::Div;
 
-use ark_bls12_381::Bls12_381;
-use ark_ec::pairing::Pairing;
+use ark_bls12_381::{Fr, G1Projective};
+use ark_ec::CurveGroup;
+use ark_ff::PrimeField;
 use ark_poly::univariate::DensePolynomial;
 use ark_poly::DenseUVPolynomial;
 
 use ark_serialize::{CanonicalSerialize, Compress, Validate};
+use ark_std::test_rng;
+use komodo::zk;
 
-type UniPoly12_381 = DensePolynomial<<Bls12_381 as Pairing>::ScalarField>;
+type UniPoly12_381 = DensePolynomial<Fr>;
 
-fn setup_template<E, P>(nb_bytes: usize)
+fn setup_template<F, G, P>(nb_bytes: usize)
 where
-    E: Pairing,
-    P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>,
+    F: PrimeField,
+    G: CurveGroup<ScalarField = F>,
+    P: DenseUVPolynomial<F, Point = F>,
     for<'a, 'b> &'a P: Div<&'b P, Output = P>,
 {
-    let setup = komodo::setup::random::<E, P>(nb_bytes).unwrap();
+    let rng = &mut test_rng();
+
+    let setup = zk::setup::<_, F, G>(nb_bytes, rng).unwrap();
 
     for (compress, validate) in [
         (Compress::Yes, Validate::Yes),
@@ -39,7 +45,7 @@ where
                 Validate::No => "no validation",
             },
             nb_bytes,
-            std::any::type_name::<E>(),
+            std::any::type_name::<F>(),
             serialized.len(),
         );
     }
@@ -47,6 +53,6 @@ where
 
 fn main() {
     for n in [1, 2, 4, 8, 16] {
-        setup_template::<Bls12_381, UniPoly12_381>(n * 1024);
+        setup_template::<Fr, G1Projective, UniPoly12_381>(n * 1024);
     }
 }
diff --git a/src/error.rs b/src/error.rs
index fc8a992f50003cf711e1f1e1eddc6b51237d6477..d5aae3a73d1de4b146d2f2e376aa532148a52c08 100644
--- a/src/error.rs
+++ b/src/error.rs
@@ -14,6 +14,10 @@ pub enum KomodoError {
     TooFewShards(usize, usize),
     #[error("Blocks are incompatible: {0}")]
     IncompatibleBlocks(String),
+    #[error("Degree is zero")]
+    DegreeIsZero,
+    #[error("too many coefficients: max is {0}, found {0}")]
+    TooFewPowersInTrustedSetup(usize, usize),
     #[error("Another error: {0}")]
     Other(String),
 }
diff --git a/src/fec.rs b/src/fec.rs
index 328830fd930aafdc03beffa9da6a5bf8671a99b0..da998169207c4e786311403da14fd1a63e75f118 100644
--- a/src/fec.rs
+++ b/src/fec.rs
@@ -1,9 +1,7 @@
 //! a module to encode, recode and decode shards of data with FEC methods
-use std::ops::{Add, Mul};
 
-use ark_ec::pairing::Pairing;
+use ark_ff::PrimeField;
 use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
-use ark_std::{One, Zero};
 use rs_merkle::algorithms::Sha256;
 use rs_merkle::Hasher;
 
@@ -19,17 +17,17 @@ use crate::linalg::Matrix;
 ///   support for _recoding_
 /// - the hash and the size represent the original data
 #[derive(Debug, Default, Clone, PartialEq, CanonicalSerialize, CanonicalDeserialize)]
-pub struct Shard<E: Pairing> {
+pub struct Shard<F: PrimeField> {
     pub k: u32,
-    pub linear_combination: Vec<E::ScalarField>,
+    pub linear_combination: Vec<F>,
     pub hash: Vec<u8>,
-    pub data: Vec<E::ScalarField>,
+    pub data: Vec<F>,
     pub size: usize,
 }
 
-impl<E: Pairing> Shard<E> {
+impl<F: PrimeField> Shard<F> {
     /// compute the linear combination between two [`Shard`]s
-    pub fn combine(&self, alpha: E::ScalarField, other: &Self, beta: E::ScalarField) -> Self {
+    pub fn combine(&self, alpha: F, other: &Self, beta: F) -> Self {
         if alpha.is_zero() {
             return other.clone();
         } else if beta.is_zero() {
@@ -63,7 +61,7 @@ impl<E: Pairing> Shard<E> {
 /// >
 /// > returns [`None`] if number of shards is not the same as the number of
 /// > coefficients or if no shards are provided.
-pub fn combine<E: Pairing>(shards: &[Shard<E>], coeffs: &[E::ScalarField]) -> Option<Shard<E>> {
+pub fn combine<F: PrimeField>(shards: &[Shard<F>], coeffs: &[F]) -> Option<Shard<F>> {
     if shards.len() != coeffs.len() {
         return None;
     }
@@ -76,7 +74,7 @@ pub fn combine<E: Pairing>(shards: &[Shard<E>], coeffs: &[E::ScalarField]) -> Op
         .zip(coeffs.iter())
         .skip(1)
         .fold((shards[0].clone(), coeffs[0]), |(acc_s, acc_c), (s, c)| {
-            (acc_s.combine(acc_c, s, *c), E::ScalarField::one())
+            (acc_s.combine(acc_c, s, *c), F::one())
         });
     Some(s)
 }
@@ -86,16 +84,16 @@ pub fn combine<E: Pairing>(shards: &[Shard<E>], coeffs: &[E::ScalarField]) -> Op
 /// > **Note**
 /// > the input data and the encoding matrix should have compatible shapes,
 /// > otherwise, an error might be thrown to the caller.
-pub fn encode<E: Pairing>(
+pub fn encode<F: PrimeField>(
     data: &[u8],
-    encoding_mat: &Matrix<E::ScalarField>,
-) -> Result<Vec<Shard<E>>, KomodoError> {
+    encoding_mat: &Matrix<F>,
+) -> Result<Vec<Shard<F>>, KomodoError> {
     let hash = Sha256::hash(data).to_vec();
 
     let k = encoding_mat.height;
 
     let source_shards = Matrix::from_vec_vec(
-        field::split_data_into_field_elements::<E>(data, k)
+        field::split_data_into_field_elements::<F>(data, k)
             .chunks(k)
             .map(|c| c.to_vec())
             .collect(),
@@ -124,7 +122,7 @@ pub fn encode<E: Pairing>(
 /// > this function might fail in a variety of cases
 /// > - if there are too few shards
 /// > - if there are linear dependencies between shards
-pub fn decode<E: Pairing>(shards: Vec<Shard<E>>) -> Result<Vec<u8>, KomodoError> {
+pub fn decode<F: PrimeField>(shards: Vec<Shard<F>>) -> Result<Vec<u8>, KomodoError> {
     let k = shards[0].k;
     let np = shards.len();
 
@@ -150,17 +148,15 @@ pub fn decode<E: Pairing>(shards: Vec<Shard<E>>) -> Result<Vec<u8>, KomodoError>
 
     let source_shards = encoding_mat.invert()?.mul(&shard_mat)?.transpose().elements;
 
-    let mut bytes = field::merge_elements_into_bytes::<E>(&source_shards);
+    let mut bytes = field::merge_elements_into_bytes::<F>(&source_shards);
     bytes.resize(shards[0].size, 0);
     Ok(bytes)
 }
 
 #[cfg(test)]
 mod tests {
-    use ark_bls12_381::Bls12_381;
-    use ark_ec::pairing::Pairing;
+    use ark_bls12_381::Fr;
     use ark_ff::PrimeField;
-    use ark_std::{One, Zero};
 
     use crate::{
         fec::{decode, encode, Shard},
@@ -174,27 +170,27 @@ mod tests {
         include_bytes!("../tests/dragoon_32x32.png").to_vec()
     }
 
-    fn to_curve<E: Pairing>(n: u128) -> E::ScalarField {
-        E::ScalarField::from_le_bytes_mod_order(&n.to_le_bytes())
+    fn to_curve<F: PrimeField>(n: u128) -> F {
+        F::from_le_bytes_mod_order(&n.to_le_bytes())
     }
 
-    fn end_to_end_template<E: Pairing>(data: &[u8], k: usize, n: usize) {
+    fn end_to_end_template<F: PrimeField>(data: &[u8], k: usize, n: usize) {
         let test_case = format!("TEST | data: {} bytes, k: {}, n: {}", data.len(), k, n);
         assert_eq!(
             data,
-            decode::<E>(encode(data, &Matrix::random(k, n)).unwrap()).unwrap(),
+            decode::<F>(encode(data, &Matrix::random(k, n)).unwrap()).unwrap(),
             "{test_case}"
         );
     }
 
     /// k should be at least 5
-    fn end_to_end_with_recoding_template<E: Pairing>(data: &[u8], k: usize, n: usize) {
+    fn end_to_end_with_recoding_template<F: PrimeField>(data: &[u8], k: usize, n: usize) {
         let mut shards = encode(data, &Matrix::random(k, n)).unwrap();
-        shards[1] = shards[2].combine(to_curve::<E>(7), &shards[4], to_curve::<E>(6));
-        shards[2] = shards[1].combine(to_curve::<E>(5), &shards[3], to_curve::<E>(4));
+        shards[1] = shards[2].combine(to_curve::<F>(7), &shards[4], to_curve::<F>(6));
+        shards[2] = shards[1].combine(to_curve::<F>(5), &shards[3], to_curve::<F>(4));
         assert_eq!(
             data,
-            decode::<E>(shards).unwrap(),
+            decode::<F>(shards).unwrap(),
             "TEST | data: {} bytes, k: {}, n: {}",
             data.len(),
             k,
@@ -204,15 +200,15 @@ mod tests {
 
     // NOTE: this is part of an experiment, to be honest, to be able to see how
     // much these tests could be refactored and simplified
-    fn run_template<E, F>(test: F)
+    fn run_template<F, Fun>(test: Fun)
     where
-        E: Pairing,
-        F: Fn(&[u8], usize, usize),
+        F: PrimeField,
+        Fun: Fn(&[u8], usize, usize),
     {
         let bytes = bytes();
         let (k, n) = (3, 5);
 
-        let modulus_byte_size = E::ScalarField::MODULUS_BIT_SIZE as usize / 8;
+        let modulus_byte_size = F::MODULUS_BIT_SIZE as usize / 8;
         // NOTE: starting at `modulus_byte_size * (k - 1) + 1` to include at least _k_ elements
         for b in (modulus_byte_size * (k - 1) + 1)..bytes.len() {
             test(&bytes[..b], k, n);
@@ -221,69 +217,64 @@ mod tests {
 
     #[test]
     fn end_to_end() {
-        run_template::<Bls12_381, _>(end_to_end_template::<Bls12_381>);
+        run_template::<Fr, _>(end_to_end_template::<Fr>);
     }
 
     #[test]
     fn end_to_end_with_recoding() {
-        run_template::<Bls12_381, _>(end_to_end_with_recoding_template::<Bls12_381>);
+        run_template::<Fr, _>(end_to_end_with_recoding_template::<Fr>);
     }
 
-    fn create_fake_shard<E: Pairing>(
-        linear_combination: &[E::ScalarField],
-        bytes: &[u8],
-    ) -> Shard<E> {
+    fn create_fake_shard<F: PrimeField>(linear_combination: &[F], bytes: &[u8]) -> Shard<F> {
         Shard {
             k: 2,
             linear_combination: linear_combination.to_vec(),
             hash: vec![],
-            data: field::split_data_into_field_elements::<E>(bytes, 1),
+            data: field::split_data_into_field_elements::<F>(bytes, 1),
             size: 0,
         }
     }
 
-    fn recoding_template<E: Pairing>() {
-        let a: Shard<E> =
-            create_fake_shard(&[E::ScalarField::one(), E::ScalarField::zero()], &[1, 2, 3]);
-        let b: Shard<E> =
-            create_fake_shard(&[E::ScalarField::zero(), E::ScalarField::one()], &[4, 5, 6]);
+    fn recoding_template<F: PrimeField>() {
+        let a: Shard<F> = create_fake_shard(&[F::one(), F::zero()], &[1, 2, 3]);
+        let b: Shard<F> = create_fake_shard(&[F::zero(), F::one()], &[4, 5, 6]);
 
-        let c = a.combine(to_curve::<E>(3), &b, to_curve::<E>(5));
+        let c = a.combine(to_curve::<F>(3), &b, to_curve::<F>(5));
 
         assert_eq!(
             c,
-            create_fake_shard(&[to_curve::<E>(3), to_curve::<E>(5),], &[23, 31, 39])
+            create_fake_shard(&[to_curve::<F>(3), to_curve::<F>(5),], &[23, 31, 39])
         );
 
         assert_eq!(
-            c.combine(to_curve::<E>(2), &a, to_curve::<E>(4),),
-            create_fake_shard(&[to_curve::<E>(10), to_curve::<E>(10),], &[50, 70, 90],)
+            c.combine(to_curve::<F>(2), &a, to_curve::<F>(4),),
+            create_fake_shard(&[to_curve::<F>(10), to_curve::<F>(10),], &[50, 70, 90],)
         );
     }
 
     #[test]
     fn recoding() {
-        recoding_template::<Bls12_381>();
+        recoding_template::<Fr>();
     }
 
-    fn combine_shards_template<E: Pairing>() {
-        let a = create_fake_shard::<E>(&[to_curve::<E>(1), to_curve::<E>(0)], &[1, 4, 7]);
-        let b = create_fake_shard::<E>(&[to_curve::<E>(0), to_curve::<E>(2)], &[2, 5, 8]);
-        let c = create_fake_shard::<E>(&[to_curve::<E>(3), to_curve::<E>(5)], &[3, 6, 9]);
+    fn combine_shards_template<F: PrimeField>() {
+        let a = create_fake_shard::<F>(&[to_curve::<F>(1), to_curve::<F>(0)], &[1, 4, 7]);
+        let b = create_fake_shard::<F>(&[to_curve::<F>(0), to_curve::<F>(2)], &[2, 5, 8]);
+        let c = create_fake_shard::<F>(&[to_curve::<F>(3), to_curve::<F>(5)], &[3, 6, 9]);
 
-        assert!(combine::<E>(&[], &[]).is_none());
-        assert!(combine::<E>(
+        assert!(combine::<F>(&[], &[]).is_none());
+        assert!(combine::<F>(
             &[a.clone(), b.clone(), c.clone()],
-            &[to_curve::<E>(1), to_curve::<E>(2)]
+            &[to_curve::<F>(1), to_curve::<F>(2)]
         )
         .is_none());
         assert_eq!(
-            combine::<E>(
+            combine::<F>(
                 &[a, b, c],
-                &[to_curve::<E>(1), to_curve::<E>(2), to_curve::<E>(3)]
+                &[to_curve::<F>(1), to_curve::<F>(2), to_curve::<F>(3)]
             ),
-            Some(create_fake_shard::<E>(
-                &[to_curve::<E>(10), to_curve::<E>(19)],
+            Some(create_fake_shard::<F>(
+                &[to_curve::<F>(10), to_curve::<F>(19)],
                 &[14, 32, 50]
             ))
         );
@@ -291,6 +282,6 @@ mod tests {
 
     #[test]
     fn combine_shards() {
-        combine_shards_template::<Bls12_381>();
+        combine_shards_template::<Fr>();
     }
 }
diff --git a/src/field.rs b/src/field.rs
index 7aaa6e9cc1dabc2d6a72cadc96fa3561b490a540..4bb055a9b54d415cf470081273d21fa1397cd805 100644
--- a/src/field.rs
+++ b/src/field.rs
@@ -1,28 +1,20 @@
 //! manipulate finite field elements
-use ark_ec::pairing::Pairing;
 use ark_ff::{BigInteger, PrimeField};
-use ark_std::One;
 
 /// split a sequence of raw bytes into valid field elements
 ///
 /// [`split_data_into_field_elements`] supports padding the output vector of
 /// elements by giving a number that needs to divide the length of the vector.
-pub fn split_data_into_field_elements<E: Pairing>(
-    bytes: &[u8],
-    modulus: usize,
-) -> Vec<E::ScalarField> {
-    let bytes_per_element = (E::ScalarField::MODULUS_BIT_SIZE as usize) / 8;
+pub fn split_data_into_field_elements<F: PrimeField>(bytes: &[u8], modulus: usize) -> Vec<F> {
+    let bytes_per_element = (F::MODULUS_BIT_SIZE as usize) / 8;
 
     let mut elements = Vec::new();
     for chunk in bytes.chunks(bytes_per_element) {
-        elements.push(E::ScalarField::from_le_bytes_mod_order(chunk));
+        elements.push(F::from_le_bytes_mod_order(chunk));
     }
 
     if elements.len() % modulus != 0 {
-        elements.resize(
-            (elements.len() / modulus + 1) * modulus,
-            E::ScalarField::one(),
-        );
+        elements.resize((elements.len() / modulus + 1) * modulus, F::one());
     }
 
     elements
@@ -31,7 +23,7 @@ pub fn split_data_into_field_elements<E: Pairing>(
 /// merges elliptic curve elements back into a sequence of bytes
 ///
 /// this is the inverse operation of [`split_data_into_field_elements`].
-pub(crate) fn merge_elements_into_bytes<E: Pairing>(elements: &[E::ScalarField]) -> Vec<u8> {
+pub(crate) fn merge_elements_into_bytes<F: PrimeField>(elements: &[F]) -> Vec<u8> {
     let mut bytes = vec![];
     for e in elements {
         let mut b = e.into_bigint().to_bytes_le();
@@ -44,10 +36,8 @@ pub(crate) fn merge_elements_into_bytes<E: Pairing>(elements: &[E::ScalarField])
 
 #[cfg(test)]
 mod tests {
-    use ark_bls12_381::Bls12_381;
-    use ark_ec::pairing::Pairing;
+    use ark_bls12_381::Fr;
     use ark_ff::PrimeField;
-    use ark_std::Zero;
 
     use crate::field::{self, merge_elements_into_bytes};
 
@@ -55,13 +45,17 @@ mod tests {
         include_bytes!("../tests/dragoon_32x32.png").to_vec()
     }
 
-    fn split_data_template<E: Pairing>(bytes: &[u8], modulus: usize, exact_length: Option<usize>) {
+    fn split_data_template<F: PrimeField>(
+        bytes: &[u8],
+        modulus: usize,
+        exact_length: Option<usize>,
+    ) {
         let test_case = format!(
             "TEST | modulus: {}, exact_length: {:?}",
             modulus, exact_length
         );
 
-        let elements = field::split_data_into_field_elements::<E>(bytes, modulus);
+        let elements = field::split_data_into_field_elements::<F>(bytes, modulus);
         assert!(
             elements.len() % modulus == 0,
             "number of elements should be divisible by {}, found {}\n{test_case}",
@@ -79,40 +73,39 @@ mod tests {
         }
 
         assert!(
-            !elements.iter().any(|&e| e == E::ScalarField::zero()),
+            !elements.iter().any(|&e| e == F::zero()),
             "elements should not contain any 0\n{test_case}"
         );
     }
 
     #[test]
     fn split_data() {
-        split_data_template::<Bls12_381>(&bytes(), 1, None);
-        split_data_template::<Bls12_381>(&bytes(), 8, None);
-        split_data_template::<Bls12_381>(&[], 1, None);
-        split_data_template::<Bls12_381>(&[], 8, None);
-
-        let nb_bytes = 11 * (<Bls12_381 as Pairing>::ScalarField::MODULUS_BIT_SIZE as usize / 8);
-        split_data_template::<Bls12_381>(&bytes()[..nb_bytes], 1, Some(11));
-        split_data_template::<Bls12_381>(&bytes()[..nb_bytes], 8, Some(16));
-
-        let nb_bytes =
-            11 * (<Bls12_381 as Pairing>::ScalarField::MODULUS_BIT_SIZE as usize / 8) - 10;
-        split_data_template::<Bls12_381>(&bytes()[..nb_bytes], 1, Some(11));
-        split_data_template::<Bls12_381>(&bytes()[..nb_bytes], 8, Some(16));
+        split_data_template::<Fr>(&bytes(), 1, None);
+        split_data_template::<Fr>(&bytes(), 8, None);
+        split_data_template::<Fr>(&[], 1, None);
+        split_data_template::<Fr>(&[], 8, None);
+
+        let nb_bytes = 11 * (Fr::MODULUS_BIT_SIZE as usize / 8);
+        split_data_template::<Fr>(&bytes()[..nb_bytes], 1, Some(11));
+        split_data_template::<Fr>(&bytes()[..nb_bytes], 8, Some(16));
+
+        let nb_bytes = 11 * (Fr::MODULUS_BIT_SIZE as usize / 8) - 10;
+        split_data_template::<Fr>(&bytes()[..nb_bytes], 1, Some(11));
+        split_data_template::<Fr>(&bytes()[..nb_bytes], 8, Some(16));
     }
 
-    fn split_and_merge_template<E: Pairing>(bytes: &[u8], modulus: usize) {
-        let elements = field::split_data_into_field_elements::<E>(bytes, modulus);
-        let mut actual = merge_elements_into_bytes::<E>(&elements);
+    fn split_and_merge_template<F: PrimeField>(bytes: &[u8], modulus: usize) {
+        let elements = field::split_data_into_field_elements::<F>(bytes, modulus);
+        let mut actual = merge_elements_into_bytes::<F>(&elements);
         actual.resize(bytes.len(), 0);
         assert_eq!(bytes, actual, "TEST | modulus: {modulus}");
     }
 
     #[test]
     fn split_and_merge() {
-        split_and_merge_template::<Bls12_381>(&bytes(), 1);
-        split_and_merge_template::<Bls12_381>(&bytes(), 8);
-        split_and_merge_template::<Bls12_381>(&bytes(), 64);
-        split_and_merge_template::<Bls12_381>(&bytes(), 4096);
+        split_and_merge_template::<Fr>(&bytes(), 1);
+        split_and_merge_template::<Fr>(&bytes(), 8);
+        split_and_merge_template::<Fr>(&bytes(), 64);
+        split_and_merge_template::<Fr>(&bytes(), 4096);
     }
 }
diff --git a/src/fs.rs b/src/fs.rs
index cea8ee549f41f95b52442b04433474cd08ac423b..77332c419061e51c8f6bacde91e1a407500cd0da 100644
--- a/src/fs.rs
+++ b/src/fs.rs
@@ -7,7 +7,8 @@ use std::{
 
 use anyhow::Result;
 
-use ark_ec::pairing::Pairing;
+use ark_ec::CurveGroup;
+use ark_ff::PrimeField;
 use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress, Validate};
 use rs_merkle::{algorithms::Sha256, Hasher};
 use tracing::info;
@@ -53,8 +54,8 @@ pub fn dump(
 
 /// dump a bunch of blocks to the disk and return a JSON / NUON compatible table
 /// of all the hashes that have been dumped
-pub fn dump_blocks<E: Pairing>(
-    blocks: &[Block<E>],
+pub fn dump_blocks<F: PrimeField, G: CurveGroup<ScalarField = F>>(
+    blocks: &[Block<F, G>],
     block_dir: &PathBuf,
     compress: Compress,
 ) -> Result<String> {
@@ -76,12 +77,12 @@ pub fn dump_blocks<E: Pairing>(
 }
 
 /// read blocks from a list of block hashes
-pub fn read_blocks<E: Pairing>(
+pub fn read_blocks<F: PrimeField, G: CurveGroup<ScalarField = F>>(
     block_hashes: &[String],
     block_dir: &Path,
     compress: Compress,
     validate: Validate,
-) -> Result<Vec<(String, Block<E>)>> {
+) -> Result<Vec<(String, Block<F, G>)>> {
     block_hashes
         .iter()
         .map(|f| {
@@ -89,7 +90,7 @@ pub fn read_blocks<E: Pairing>(
             let s = std::fs::read(filename)?;
             Ok((
                 f.clone(),
-                Block::<E>::deserialize_with_mode(&s[..], compress, validate)?,
+                Block::<F, G>::deserialize_with_mode(&s[..], compress, validate)?,
             ))
         })
         .collect()
diff --git a/src/lib.rs b/src/lib.rs
index 75cf78a3865e1ac405b713e0b5cd4084de5558e0..5188707b5ef75beb6e4af3ffab45e4e4f8f08fb0 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -1,36 +1,37 @@
 //! Komodo: Cryptographically-proven Erasure Coding
 use std::ops::Div;
 
-use ark_ec::pairing::Pairing;
+use ark_ec::CurveGroup;
+use ark_ff::PrimeField;
 use ark_poly::DenseUVPolynomial;
-use ark_poly_commit::kzg10::{Commitment, Powers, Randomness, KZG10};
 use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
-use ark_std::{UniformRand, Zero};
-use fec::combine;
 use tracing::{debug, info};
 
-mod error;
+pub mod error;
 pub mod fec;
 pub mod field;
 pub mod fs;
 pub mod linalg;
-pub mod setup;
+pub mod zk;
 
-use error::KomodoError;
-
-use crate::linalg::Matrix;
+use crate::{
+    error::KomodoError,
+    fec::combine,
+    linalg::Matrix,
+    zk::{Commitment, Powers},
+};
 
 /// representation of a block of proven data.
 ///
 /// this is a wrapper around a [`fec::Shard`] with some additional cryptographic
 /// information that allows to prove the integrity of said shard.
 #[derive(Debug, Default, Clone, PartialEq, CanonicalSerialize, CanonicalDeserialize)]
-pub struct Block<E: Pairing> {
-    pub shard: fec::Shard<E>,
-    pub commit: Vec<Commitment<E>>,
+pub struct Block<F: PrimeField, G: CurveGroup<ScalarField = F>> {
+    pub shard: fec::Shard<F>,
+    pub commit: Vec<Commitment<F, G>>,
 }
 
-impl<E: Pairing> std::fmt::Display for Block<E> {
+impl<F: PrimeField, G: CurveGroup<ScalarField = F>> std::fmt::Display for Block<F, G> {
     fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
         write!(f, "{{")?;
         write!(f, "shard: {{")?;
@@ -79,33 +80,32 @@ impl<E: Pairing> std::fmt::Display for Block<E> {
     }
 }
 
-/// compute the commitments and randomnesses of a set of polynomials
+/// compute the commitments of a set of polynomials
 ///
 /// this function uses the commit scheme of KZG.
 ///
 /// > **Note**
-/// > - `powers` can be generated with functions like [`setup::random`]
+/// > - `powers` can be generated with functions like [`setup::setup`]
 /// > - if `polynomials` has length `n`, then [`commit`] will generate `n`
-/// >   commits and `n` randomnesses.
+/// >   commits.
 #[allow(clippy::type_complexity)]
-pub fn commit<E, P>(
-    powers: &Powers<E>,
+pub fn commit<F, G, P>(
+    powers: &Powers<F, G>,
     polynomials: &[P],
-) -> Result<(Vec<Commitment<E>>, Vec<Randomness<E::ScalarField, P>>), ark_poly_commit::Error>
+) -> Result<Vec<Commitment<F, G>>, KomodoError>
 where
-    E: Pairing,
-    P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>,
+    F: PrimeField,
+    G: CurveGroup<ScalarField = F>,
+    P: DenseUVPolynomial<F, Point = F>,
     for<'a, 'b> &'a P: Div<&'b P, Output = P>,
 {
     let mut commits = Vec::new();
-    let mut randomnesses = Vec::new();
     for polynomial in polynomials {
-        let (commit, randomness) = KZG10::<E, P>::commit(powers, polynomial, None, None)?;
+        let commit = zk::commit(powers, polynomial)?;
         commits.push(commit);
-        randomnesses.push(randomness);
     }
 
-    Ok((commits, randomnesses))
+    Ok(commits)
 }
 
 /// compute encoded and proven blocks of data from some data and an encoding
@@ -113,14 +113,15 @@ where
 ///
 /// > **Note**
 /// > this is a wrapper around [`fec::encode`].
-pub fn encode<E, P>(
+pub fn encode<F, G, P>(
     bytes: &[u8],
-    encoding_mat: &Matrix<E::ScalarField>,
-    powers: &Powers<E>,
-) -> Result<Vec<Block<E>>, ark_poly_commit::Error>
+    encoding_mat: &Matrix<F>,
+    powers: &Powers<F, G>,
+) -> Result<Vec<Block<F, G>>, KomodoError>
 where
-    E: Pairing,
-    P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>,
+    F: PrimeField,
+    G: CurveGroup<ScalarField = F>,
+    P: DenseUVPolynomial<F, Point = F>,
     for<'a, 'b> &'a P: Div<&'b P, Output = P>,
 {
     info!("encoding and proving {} bytes", bytes.len());
@@ -128,7 +129,7 @@ where
     let k = encoding_mat.height;
 
     debug!("splitting bytes into polynomials");
-    let elements = field::split_data_into_field_elements::<E>(bytes, k);
+    let elements = field::split_data_into_field_elements::<F>(bytes, k);
     let polynomials = elements
         .chunks(k)
         .map(|c| P::from_coefficients_vec(c.to_vec()))
@@ -145,7 +146,7 @@ where
         .collect::<Vec<P>>();
 
     debug!("committing the polynomials");
-    let (commits, _) = commit(powers, &polynomials_to_commit)?;
+    let commits = commit(powers, &polynomials_to_commit)?;
 
     Ok(fec::encode(bytes, encoding_mat)
         .unwrap() // TODO: don't unwrap here
@@ -166,13 +167,12 @@ where
 ///
 /// > **Note**
 /// > this is a wrapper around [`fec::combine`].
-pub fn recode<E: Pairing>(blocks: &[Block<E>]) -> Result<Option<Block<E>>, KomodoError> {
+pub fn recode<F: PrimeField, G: CurveGroup<ScalarField = F>>(
+    blocks: &[Block<F, G>],
+) -> Result<Option<Block<F, G>>, KomodoError> {
     let mut rng = rand::thread_rng();
 
-    let coeffs = blocks
-        .iter()
-        .map(|_| E::ScalarField::rand(&mut rng))
-        .collect::<Vec<_>>();
+    let coeffs = blocks.iter().map(|_| F::rand(&mut rng)).collect::<Vec<_>>();
 
     for (i, (b1, b2)) in blocks.iter().zip(blocks.iter().skip(1)).enumerate() {
         if b1.shard.k != b2.shard.k {
@@ -215,116 +215,125 @@ pub fn recode<E: Pairing>(blocks: &[Block<E>]) -> Result<Option<Block<E>>, Komod
 }
 
 /// verify that a single block of encoded and proven data is valid
-pub fn verify<E, P>(
-    block: &Block<E>,
-    verifier_key: &Powers<E>,
-) -> Result<bool, ark_poly_commit::Error>
+pub fn verify<F, G, P>(
+    block: &Block<F, G>,
+    verifier_key: &Powers<F, G>,
+) -> Result<bool, KomodoError>
 where
-    E: Pairing,
-    P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>,
+    F: PrimeField,
+    G: CurveGroup<ScalarField = F>,
+    P: DenseUVPolynomial<F, Point = F>,
     for<'a, 'b> &'a P: Div<&'b P, Output = P>,
 {
     let elements = block.shard.data.clone();
     let polynomial = P::from_coefficients_vec(elements);
-    let (commit, _) = KZG10::<E, P>::commit(verifier_key, &polynomial, None, None)?;
+    let commit = zk::commit(verifier_key, &polynomial)?;
 
     let rhs = block
         .shard
         .linear_combination
         .iter()
         .enumerate()
-        .map(|(i, w)| Into::<E::G1>::into(block.commit[i].0) * w)
+        .map(|(i, w)| Into::<G>::into(block.commit[i].0) * w)
         .sum();
-    Ok(Into::<E::G1>::into(commit.0) == rhs)
+    Ok(Into::<G>::into(commit.0) == rhs)
 }
 
 #[cfg(test)]
 mod tests {
-    use std::ops::{Div, Mul};
+    use std::ops::Div;
 
-    use ark_bls12_381::Bls12_381;
-    use ark_ec::pairing::Pairing;
+    use ark_bls12_381::{Fr, G1Projective};
+    use ark_ec::CurveGroup;
     use ark_ff::{Field, PrimeField};
     use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial};
-    use ark_poly_commit::kzg10::Commitment;
+    use ark_std::test_rng;
 
     use crate::{
         encode,
+        error::KomodoError,
         fec::{decode, Shard},
         linalg::Matrix,
-        recode, setup, verify,
+        recode, verify,
+        zk::{setup, Commitment},
     };
 
-    type UniPoly381 = DensePolynomial<<Bls12_381 as Pairing>::ScalarField>;
+    type UniPoly381 = DensePolynomial<Fr>;
 
     fn bytes() -> Vec<u8> {
         include_bytes!("../tests/dragoon_133x133.png").to_vec()
     }
 
-    fn verify_template<E, P>(
-        bytes: &[u8],
-        encoding_mat: &Matrix<E::ScalarField>,
-    ) -> Result<(), ark_poly_commit::Error>
+    fn verify_template<F, G, P>(bytes: &[u8], encoding_mat: &Matrix<F>) -> Result<(), KomodoError>
     where
-        E: Pairing,
-        P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>,
+        F: PrimeField,
+        G: CurveGroup<ScalarField = F>,
+        P: DenseUVPolynomial<F, Point = F>,
         for<'a, 'b> &'a P: Div<&'b P, Output = P>,
     {
-        let powers = setup::random(bytes.len())?;
-        let blocks = encode::<E, P>(bytes, encoding_mat, &powers)?;
+        let rng = &mut test_rng();
+
+        let powers = setup(bytes.len(), rng)?;
+        let blocks = encode::<F, G, P>(bytes, encoding_mat, &powers)?;
 
         for block in &blocks {
-            assert!(verify::<E, P>(block, &powers)?);
+            assert!(verify::<F, G, P>(block, &powers)?);
         }
 
         Ok(())
     }
 
-    fn verify_with_errors_template<E, P>(
+    fn verify_with_errors_template<F, G, P>(
         bytes: &[u8],
-        encoding_mat: &Matrix<E::ScalarField>,
-    ) -> Result<(), ark_poly_commit::Error>
+        encoding_mat: &Matrix<F>,
+    ) -> Result<(), KomodoError>
     where
-        E: Pairing,
-        P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>,
+        F: PrimeField,
+        G: CurveGroup<ScalarField = F>,
+        P: DenseUVPolynomial<F, Point = F>,
         for<'a, 'b> &'a P: Div<&'b P, Output = P>,
     {
-        let powers = setup::random(bytes.len())?;
-        let blocks = encode::<E, P>(bytes, encoding_mat, &powers)?;
+        let rng = &mut test_rng();
+
+        let powers = setup(bytes.len(), rng)?;
+        let blocks = encode::<F, G, P>(bytes, encoding_mat, &powers)?;
 
         for block in &blocks {
-            assert!(verify::<E, P>(block, &powers)?);
+            assert!(verify::<F, G, P>(block, &powers)?);
         }
 
         let mut corrupted_block = blocks[0].clone();
         // modify a field in the struct b to corrupt the block proof without corrupting the data serialization
-        let a = E::ScalarField::from_le_bytes_mod_order(&123u128.to_le_bytes());
-        let mut commits: Vec<E::G1> = corrupted_block.commit.iter().map(|c| c.0.into()).collect();
+        let a = F::from_le_bytes_mod_order(&123u128.to_le_bytes());
+        let mut commits: Vec<G> = corrupted_block.commit.iter().map(|c| c.0.into()).collect();
         commits[0] = commits[0].mul(a.pow([4321_u64]));
         corrupted_block.commit = commits.iter().map(|&c| Commitment(c.into())).collect();
 
-        assert!(!verify::<E, P>(&corrupted_block, &powers)?);
+        assert!(!verify::<F, G, P>(&corrupted_block, &powers)?);
 
         Ok(())
     }
 
-    fn verify_recoding_template<E, P>(
+    fn verify_recoding_template<F, G, P>(
         bytes: &[u8],
-        encoding_mat: &Matrix<E::ScalarField>,
-    ) -> Result<(), ark_poly_commit::Error>
+        encoding_mat: &Matrix<F>,
+    ) -> Result<(), KomodoError>
     where
-        E: Pairing,
-        P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>,
+        F: PrimeField,
+        G: CurveGroup<ScalarField = F>,
+        P: DenseUVPolynomial<F, Point = F>,
         for<'a, 'b> &'a P: Div<&'b P, Output = P>,
     {
-        let powers = setup::random(bytes.len())?;
-        let blocks = encode::<E, P>(bytes, encoding_mat, &powers)?;
+        let rng = &mut test_rng();
+
+        let powers = setup(bytes.len(), rng)?;
+        let blocks = encode::<F, G, P>(bytes, encoding_mat, &powers)?;
 
-        assert!(verify::<E, P>(
+        assert!(verify::<F, G, P>(
             &recode(&blocks[2..=3]).unwrap().unwrap(),
             &powers
         )?);
-        assert!(verify::<E, P>(
+        assert!(verify::<F, G, P>(
             &recode(&[blocks[3].clone(), blocks[5].clone()])
                 .unwrap()
                 .unwrap(),
@@ -334,37 +343,43 @@ mod tests {
         Ok(())
     }
 
-    fn end_to_end_template<E, P>(
+    fn end_to_end_template<F, G, P>(
         bytes: &[u8],
-        encoding_mat: &Matrix<E::ScalarField>,
-    ) -> Result<(), ark_poly_commit::Error>
+        encoding_mat: &Matrix<F>,
+    ) -> Result<(), KomodoError>
     where
-        E: Pairing,
-        P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>,
+        F: PrimeField,
+        G: CurveGroup<ScalarField = F>,
+        P: DenseUVPolynomial<F, Point = F>,
         for<'a, 'b> &'a P: Div<&'b P, Output = P>,
     {
-        let powers = setup::random(bytes.len())?;
-        let blocks: Vec<Shard<E>> = encode::<E, P>(bytes, encoding_mat, &powers)?
+        let rng = &mut test_rng();
+
+        let powers = setup(bytes.len(), rng)?;
+        let blocks: Vec<Shard<F>> = encode::<F, G, P>(bytes, encoding_mat, &powers)?
             .iter()
             .map(|b| b.shard.clone())
             .collect();
 
-        assert_eq!(bytes, decode::<E>(blocks).unwrap());
+        assert_eq!(bytes, decode::<F>(blocks).unwrap());
 
         Ok(())
     }
 
-    fn end_to_end_with_recoding_template<E, P>(
+    fn end_to_end_with_recoding_template<F, G, P>(
         bytes: &[u8],
-        encoding_mat: &Matrix<E::ScalarField>,
-    ) -> Result<(), ark_poly_commit::Error>
+        encoding_mat: &Matrix<F>,
+    ) -> Result<(), KomodoError>
     where
-        E: Pairing,
-        P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>,
+        F: PrimeField,
+        G: CurveGroup<ScalarField = F>,
+        P: DenseUVPolynomial<F, Point = F>,
         for<'a, 'b> &'a P: Div<&'b P, Output = P>,
     {
-        let powers = setup::random(bytes.len())?;
-        let blocks = encode::<E, P>(bytes, encoding_mat, &powers)?;
+        let rng = &mut test_rng();
+
+        let powers = setup(bytes.len(), rng)?;
+        let blocks = encode::<F, G, P>(bytes, encoding_mat, &powers)?;
 
         let b_0_1 = recode(&blocks[0..=1]).unwrap().unwrap();
         let shards = vec![
@@ -372,7 +387,7 @@ mod tests {
             blocks[2].shard.clone(),
             blocks[3].shard.clone(),
         ];
-        assert_eq!(bytes, decode::<E>(shards).unwrap());
+        assert_eq!(bytes, decode::<F>(shards).unwrap());
 
         let b_0_1 = recode(&[blocks[0].clone(), blocks[1].clone()])
             .unwrap()
@@ -382,7 +397,7 @@ mod tests {
             blocks[1].shard.clone(),
             b_0_1.shard,
         ];
-        assert!(decode::<E>(shards).is_err());
+        assert!(decode::<F>(shards).is_err());
 
         let b_0_1 = recode(&blocks[0..=1]).unwrap().unwrap();
         let b_2_3 = recode(&blocks[2..=3]).unwrap().unwrap();
@@ -390,24 +405,24 @@ mod tests {
             .unwrap()
             .unwrap();
         let shards = vec![b_0_1.shard, b_2_3.shard, b_1_4.shard];
-        assert_eq!(bytes, decode::<E>(shards).unwrap());
+        assert_eq!(bytes, decode::<F>(shards).unwrap());
 
         let fully_recoded_shards = (0..3)
             .map(|_| recode(&blocks[0..=2]).unwrap().unwrap().shard)
             .collect();
-        assert_eq!(bytes, decode::<E>(fully_recoded_shards).unwrap());
+        assert_eq!(bytes, decode::<F>(fully_recoded_shards).unwrap());
 
         Ok(())
     }
 
     // NOTE: this is part of an experiment, to be honest, to be able to see how
     // much these tests could be refactored and simplified
-    fn run_template<E, T, P, F>(test: F)
+    fn run_template<F, T, P, Fun>(test: Fun)
     where
-        E: Pairing,
+        F: PrimeField,
         T: Field,
-        F: Fn(&[u8], &Matrix<T>) -> Result<(), ark_poly_commit::Error>,
-        P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>,
+        Fun: Fn(&[u8], &Matrix<T>) -> Result<(), KomodoError>,
+        P: DenseUVPolynomial<F, Point = F>,
         for<'a, 'b> &'a P: Div<&'b P, Output = P>,
     {
         let (k, n) = (3, 6);
@@ -423,32 +438,32 @@ mod tests {
 
     #[test]
     fn verification() {
-        run_template::<Bls12_381, _, UniPoly381, _>(verify_template::<Bls12_381, UniPoly381>);
+        run_template::<Fr, _, UniPoly381, _>(verify_template::<Fr, G1Projective, UniPoly381>);
     }
 
     #[test]
     fn verify_with_errors() {
-        run_template::<Bls12_381, _, UniPoly381, _>(
-            verify_with_errors_template::<Bls12_381, UniPoly381>,
+        run_template::<Fr, _, UniPoly381, _>(
+            verify_with_errors_template::<Fr, G1Projective, UniPoly381>,
         );
     }
 
     #[test]
     fn verify_recoding() {
-        run_template::<Bls12_381, _, UniPoly381, _>(
-            verify_recoding_template::<Bls12_381, UniPoly381>,
+        run_template::<Fr, _, UniPoly381, _>(
+            verify_recoding_template::<Fr, G1Projective, UniPoly381>,
         );
     }
 
     #[test]
     fn end_to_end() {
-        run_template::<Bls12_381, _, UniPoly381, _>(end_to_end_template::<Bls12_381, UniPoly381>);
+        run_template::<Fr, _, UniPoly381, _>(end_to_end_template::<Fr, G1Projective, UniPoly381>);
     }
 
     #[test]
     fn end_to_end_with_recoding() {
-        run_template::<Bls12_381, _, UniPoly381, _>(
-            end_to_end_with_recoding_template::<Bls12_381, UniPoly381>,
+        run_template::<Fr, _, UniPoly381, _>(
+            end_to_end_with_recoding_template::<Fr, G1Projective, UniPoly381>,
         );
     }
 }
diff --git a/src/main.rs b/src/main.rs
index 06ce83332f2371c2d8e54f5546969ed9a144315f..4edf8d47cdb594414ba07b3aa8c4c8c51f4818d6 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -4,13 +4,15 @@ use std::process::exit;
 
 use anyhow::Result;
 
-use ark_bls12_381::Bls12_381;
-use ark_ec::pairing::Pairing;
+use ark_bls12_381::{Fr, G1Projective};
+use ark_ec::CurveGroup;
 use ark_ff::PrimeField;
 use ark_poly::univariate::DensePolynomial;
 use ark_poly::DenseUVPolynomial;
-use ark_poly_commit::kzg10::Powers;
 use ark_serialize::{CanonicalDeserialize, Compress, Validate};
+use ark_std::test_rng;
+use komodo::error::KomodoError;
+use komodo::zk::Powers;
 use tracing::{info, warn};
 
 use komodo::{
@@ -18,10 +20,10 @@ use komodo::{
     fec::{decode, Shard},
     fs,
     linalg::Matrix,
-    recode, setup, verify, Block,
+    recode, verify, zk, Block,
 };
 
-type UniPoly12_381 = DensePolynomial<<Bls12_381 as Pairing>::ScalarField>;
+type UniPoly12_381 = DensePolynomial<Fr>;
 
 const COMPRESS: Compress = Compress::Yes;
 const VALIDATE: Validate = Validate::Yes;
@@ -118,37 +120,41 @@ fn throw_error(code: i32, message: &str) {
     exit(code);
 }
 
-pub fn generate_random_powers<E, P>(
+pub fn generate_random_powers<F, G, P>(
     n: usize,
     powers_dir: &Path,
     powers_filename: Option<&str>,
 ) -> Result<()>
 where
-    E: Pairing,
-    P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>,
+    F: PrimeField,
+    G: CurveGroup<ScalarField = F>,
+    P: DenseUVPolynomial<F, Point = F>,
     for<'a, 'b> &'a P: Div<&'b P, Output = P>,
 {
+    let rng = &mut test_rng();
+
     info!("generating new powers");
-    let powers = setup::random::<E, P>(n)?;
+    let powers = zk::setup::<_, F, G>(n, rng)?;
 
     fs::dump(&powers, powers_dir, powers_filename, COMPRESS)?;
 
     Ok(())
 }
 
-pub fn verify_blocks<E, P>(
-    blocks: &[(String, Block<E>)],
-    powers: Powers<E>,
-) -> Result<(), ark_poly_commit::Error>
+pub fn verify_blocks<F, G, P>(
+    blocks: &[(String, Block<F, G>)],
+    powers: Powers<F, G>,
+) -> Result<(), KomodoError>
 where
-    E: Pairing,
-    P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>,
+    F: PrimeField,
+    G: CurveGroup<ScalarField = F>,
+    P: DenseUVPolynomial<F, Point = F>,
     for<'a, 'b> &'a P: Div<&'b P, Output = P>,
 {
     let res = blocks
         .iter()
-        .map(|(f, b)| Ok((f, verify::<E, P>(b, &powers)?)))
-        .collect::<Result<Vec<(&String, bool)>, ark_poly_commit::Error>>()?;
+        .map(|(f, b)| Ok((f, verify::<F, G, P>(b, &powers)?)))
+        .collect::<Result<Vec<(&String, bool)>, KomodoError>>()?;
 
     eprint!("[");
     for (f, v) in res {
@@ -183,7 +189,7 @@ fn main() {
     let powers_file = powers_dir.join(powers_filename);
 
     if do_generate_powers {
-        generate_random_powers::<Bls12_381, UniPoly12_381>(
+        generate_random_powers::<Fr, G1Projective, UniPoly12_381>(
             nb_bytes,
             &powers_dir,
             Some(powers_filename),
@@ -194,8 +200,8 @@ fn main() {
     }
 
     if do_reconstruct_data {
-        let blocks: Vec<Shard<Bls12_381>> =
-            fs::read_blocks::<Bls12_381>(&block_hashes, &block_dir, COMPRESS, VALIDATE)
+        let blocks: Vec<Shard<Fr>> =
+            fs::read_blocks::<Fr, G1Projective>(&block_hashes, &block_dir, COMPRESS, VALIDATE)
                 .unwrap_or_else(|e| {
                     throw_error(1, &format!("could not read blocks: {}", e));
                     unreachable!()
@@ -206,7 +212,7 @@ fn main() {
                 .collect();
         eprintln!(
             "{:?}",
-            decode::<Bls12_381>(blocks).unwrap_or_else(|e| {
+            decode::<Fr>(blocks).unwrap_or_else(|e| {
                 throw_error(1, &format!("could not decode: {}", e));
                 unreachable!()
             })
@@ -216,11 +222,12 @@ fn main() {
     }
 
     if do_combine_blocks {
-        let blocks = fs::read_blocks::<Bls12_381>(&block_hashes, &block_dir, COMPRESS, VALIDATE)
-            .unwrap_or_else(|e| {
-                throw_error(1, &format!("could not read blocks: {}", e));
-                unreachable!()
-            });
+        let blocks =
+            fs::read_blocks::<Fr, G1Projective>(&block_hashes, &block_dir, COMPRESS, VALIDATE)
+                .unwrap_or_else(|e| {
+                    throw_error(1, &format!("could not read blocks: {}", e));
+                    unreachable!()
+                });
 
         let formatted_output = fs::dump_blocks(
             &[
@@ -248,11 +255,12 @@ fn main() {
     }
 
     if do_inspect_blocks {
-        let blocks = fs::read_blocks::<Bls12_381>(&block_hashes, &block_dir, COMPRESS, VALIDATE)
-            .unwrap_or_else(|e| {
-                throw_error(1, &format!("could not read blocks: {}", e));
-                unreachable!()
-            });
+        let blocks =
+            fs::read_blocks::<Fr, G1Projective>(&block_hashes, &block_dir, COMPRESS, VALIDATE)
+                .unwrap_or_else(|e| {
+                    throw_error(1, &format!("could not read blocks: {}", e));
+                    unreachable!()
+                });
         eprint!("[");
         for (_, block) in &blocks {
             eprint!("{},", block);
@@ -265,7 +273,7 @@ fn main() {
     info!("reading powers from file `{:?}`", powers_file);
     let powers = if let Ok(serialized) = std::fs::read(&powers_file) {
         info!("deserializing the powers from `{:?}`", powers_file);
-        Powers::<Bls12_381>::deserialize_with_mode(&serialized[..], COMPRESS, VALIDATE)
+        Powers::<Fr, G1Projective>::deserialize_with_mode(&serialized[..], COMPRESS, VALIDATE)
             .unwrap_or_else(|e| {
                 throw_error(
                     1,
@@ -276,15 +284,17 @@ fn main() {
     } else {
         warn!("could not read powers from `{:?}`", powers_file);
         info!("regenerating temporary powers");
-        setup::random::<Bls12_381, UniPoly12_381>(nb_bytes).unwrap_or_else(|e| {
+        let rng = &mut test_rng();
+
+        zk::setup::<_, Fr, G1Projective>(nb_bytes, rng).unwrap_or_else(|e| {
             throw_error(1, &format!("could not generate powers: {}", e));
             unreachable!()
         })
     };
 
     if do_verify_blocks {
-        verify_blocks::<Bls12_381, UniPoly12_381>(
-            &fs::read_blocks::<Bls12_381>(&block_hashes, &block_dir, COMPRESS, VALIDATE)
+        verify_blocks::<Fr, G1Projective, UniPoly12_381>(
+            &fs::read_blocks::<Fr, G1Projective>(&block_hashes, &block_dir, COMPRESS, VALIDATE)
                 .unwrap_or_else(|e| {
                     throw_error(1, &format!("could not read blocks: {}", e));
                     unreachable!()
@@ -301,10 +311,8 @@ fn main() {
 
     let encoding_mat = match encoding_method.as_str() {
         "vandermonde" => {
-            let points: Vec<<Bls12_381 as Pairing>::ScalarField> = (0..n)
-                .map(|i| {
-                    <Bls12_381 as Pairing>::ScalarField::from_le_bytes_mod_order(&i.to_le_bytes())
-                })
+            let points: Vec<Fr> = (0..n)
+                .map(|i| Fr::from_le_bytes_mod_order(&i.to_le_bytes()))
                 .collect();
             Matrix::vandermonde(&points, k)
         }
@@ -316,10 +324,12 @@ fn main() {
     };
 
     let formatted_output = fs::dump_blocks(
-        &encode::<Bls12_381, UniPoly12_381>(&bytes, &encoding_mat, &powers).unwrap_or_else(|e| {
-            throw_error(1, &format!("could not encode: {}", e));
-            unreachable!()
-        }),
+        &encode::<Fr, G1Projective, UniPoly12_381>(&bytes, &encoding_mat, &powers).unwrap_or_else(
+            |e| {
+                throw_error(1, &format!("could not encode: {}", e));
+                unreachable!()
+            },
+        ),
         &block_dir,
         COMPRESS,
     )
diff --git a/src/setup.rs b/src/setup.rs
deleted file mode 100644
index 6145cf977ea829c65ac981a1b72b6a6b4ae0b68f..0000000000000000000000000000000000000000
--- a/src/setup.rs
+++ /dev/null
@@ -1,112 +0,0 @@
-//! create and interact with ZK trusted setups
-use std::ops::Div;
-
-use anyhow::Result;
-
-use ark_ec::pairing::Pairing;
-use ark_ff::PrimeField;
-use ark_poly::DenseUVPolynomial;
-use ark_poly_commit::kzg10::{Powers, UniversalParams, VerifierKey, KZG10};
-use ark_std::test_rng;
-
-/// Specializes the public parameters for a given maximum degree `d` for polynomials
-///
-/// `d` should be less that `pp.max_degree()`.
-///
-/// > see [`ark-poly-commit::kzg10::tests::KZG10`](https://github.com/jdetchart/poly-commit/blob/master/src/kzg10/mod.rs#L509)
-pub fn trim<E: Pairing>(
-    pp: UniversalParams<E>,
-    supported_degree: usize,
-) -> (Powers<'static, E>, VerifierKey<E>) {
-    let powers_of_g = pp.powers_of_g[..=supported_degree].to_vec();
-    let powers_of_gamma_g = (0..=supported_degree)
-        .map(|i| pp.powers_of_gamma_g[&i])
-        .collect();
-
-    let powers = Powers {
-        powers_of_g: ark_std::borrow::Cow::Owned(powers_of_g),
-        powers_of_gamma_g: ark_std::borrow::Cow::Owned(powers_of_gamma_g),
-    };
-    let vk = VerifierKey {
-        g: pp.powers_of_g[0],
-        gamma_g: pp.powers_of_gamma_g[&0],
-        h: pp.h,
-        beta_h: pp.beta_h,
-        prepared_h: pp.prepared_h.clone(),
-        prepared_beta_h: pp.prepared_beta_h.clone(),
-    };
-
-    (powers, vk)
-}
-
-/// build a random trusted setup for a given number of bytes
-///
-/// `nb_bytes` will be divided by the "_modulus size_" of the elliptic curve to
-/// get the number of powers of the secret to generate, e.g. creating a trusted
-/// setup for 10kib on BLS-12-381 requires 331 powers of $\tau$.
-///
-/// /!\ Should be used only for tests, not for any real world usage. /!\
-pub fn random<E, P>(nb_bytes: usize) -> Result<Powers<'static, E>, ark_poly_commit::Error>
-where
-    E: Pairing,
-    P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>,
-    for<'a, 'b> &'a P: Div<&'b P, Output = P>,
-{
-    let degree = nb_bytes / (E::ScalarField::MODULUS_BIT_SIZE as usize / 8);
-
-    let rng = &mut test_rng();
-
-    let params = KZG10::<E, P>::setup(degree, false, rng)?;
-    let (powers, _) = trim(params, degree);
-
-    Ok(powers)
-}
-
-#[cfg(test)]
-mod tests {
-    use std::ops::Div;
-
-    use ark_bls12_381::Bls12_381;
-    use ark_ec::pairing::Pairing;
-    use ark_ff::PrimeField;
-    use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial};
-
-    use super::random;
-
-    type UniPoly381 = DensePolynomial<<Bls12_381 as Pairing>::ScalarField>;
-
-    /// computes $a / b$ rounded to the integer above
-    ///
-    /// > **Note**
-    /// > co-authored by ChatGPT
-    fn ceil_divide(a: usize, b: usize) -> usize {
-        (a + b - 1) / b
-    }
-
-    #[test]
-    fn test_ceil_divide() {
-        assert_eq!(ceil_divide(10, 2), 5);
-        assert_eq!(ceil_divide(10, 3), 4);
-        assert_eq!(ceil_divide(10, 6), 2);
-    }
-
-    fn random_setup_size_template<E, P>(nb_bytes: usize)
-    where
-        E: Pairing,
-        P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>,
-        for<'a, 'b> &'a P: Div<&'b P, Output = P>,
-    {
-        let powers = random::<E, P>(nb_bytes);
-        assert!(powers.is_ok());
-
-        assert_eq!(
-            powers.unwrap().powers_of_g.to_vec().len(),
-            ceil_divide(nb_bytes, E::ScalarField::MODULUS_BIT_SIZE as usize / 8)
-        );
-    }
-
-    #[test]
-    fn random_setup_size() {
-        random_setup_size_template::<Bls12_381, UniPoly381>(10 * 1_024);
-    }
-}
diff --git a/src/zk.rs b/src/zk.rs
new file mode 100644
index 0000000000000000000000000000000000000000..65fbe503539f38da9d442a6a5b3f44a5d23067fe
--- /dev/null
+++ b/src/zk.rs
@@ -0,0 +1,215 @@
+use ark_ec::{scalar_mul::fixed_base::FixedBase, CurveGroup, VariableBaseMSM};
+use ark_ff::PrimeField;
+use ark_poly::DenseUVPolynomial;
+use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
+use ark_std::{end_timer, rand::RngCore, start_timer};
+
+use crate::error::KomodoError;
+
+#[derive(Debug, Clone, Default, CanonicalSerialize, CanonicalDeserialize, PartialEq)]
+pub struct Powers<F: PrimeField, G: CurveGroup<ScalarField = F>>(Vec<G::Affine>);
+
+impl<F: PrimeField, G: CurveGroup<ScalarField = F>> Powers<F, G> {
+    fn len(&self) -> usize {
+        self.0.len()
+    }
+}
+
+impl<F: PrimeField, G: CurveGroup<ScalarField = F>> IntoIterator for Powers<F, G> {
+    type Item = G::Affine;
+    type IntoIter = std::vec::IntoIter<Self::Item>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.0.into_iter()
+    }
+}
+
+#[derive(Debug, Clone, Copy, Default, CanonicalSerialize, CanonicalDeserialize, PartialEq)]
+pub struct Commitment<F: PrimeField, G: CurveGroup<ScalarField = F>>(pub G::Affine);
+
+pub fn setup<R: RngCore, F: PrimeField, G: CurveGroup<ScalarField = F>>(
+    max_degree: usize,
+    rng: &mut R,
+) -> Result<Powers<F, G>, KomodoError> {
+    if max_degree < 1 {
+        return Err(KomodoError::DegreeIsZero);
+    }
+    let setup_time = start_timer!(|| format!("setup with degree {}", max_degree));
+
+    let beta = F::rand(rng);
+    let g = G::rand(rng);
+
+    let mut powers_of_beta = vec![F::one()];
+    let mut cur = beta;
+    for _ in 0..max_degree {
+        powers_of_beta.push(cur);
+        cur *= &beta;
+    }
+
+    let window_size = FixedBase::get_mul_window_size(max_degree + 1);
+    let scalar_bits = F::MODULUS_BIT_SIZE as usize;
+
+    let g_time = start_timer!(|| "Generating powers of G");
+    let g_table = FixedBase::get_window_table(scalar_bits, window_size, g);
+    let powers_of_g = FixedBase::msm::<G>(scalar_bits, window_size, &g_table, &powers_of_beta);
+    end_timer!(g_time);
+
+    let powers_of_g: Vec<G::Affine> = G::normalize_batch(&powers_of_g);
+
+    end_timer!(setup_time);
+    Ok(Powers(powers_of_g))
+}
+
+fn check_degree_is_too_large(degree: usize, num_powers: usize) -> Result<(), KomodoError> {
+    let num_coefficients = degree + 1;
+    if num_coefficients > num_powers {
+        Err(KomodoError::TooFewPowersInTrustedSetup(
+            num_powers,
+            num_coefficients,
+        ))
+    } else {
+        Ok(())
+    }
+}
+
+fn skip_leading_zeros_and_convert_to_bigints<F: PrimeField, P: DenseUVPolynomial<F>>(
+    p: &P,
+) -> (usize, Vec<F::BigInt>) {
+    let mut num_leading_zeros = 0;
+    while num_leading_zeros < p.coeffs().len() && p.coeffs()[num_leading_zeros].is_zero() {
+        num_leading_zeros += 1;
+    }
+    let coeffs = convert_to_bigints(&p.coeffs()[num_leading_zeros..]);
+    (num_leading_zeros, coeffs)
+}
+
+fn convert_to_bigints<F: PrimeField>(p: &[F]) -> Vec<F::BigInt> {
+    let to_bigint_time = start_timer!(|| "Converting polynomial coeffs to bigints");
+    let coeffs = ark_std::cfg_iter!(p)
+        .map(|s| s.into_bigint())
+        .collect::<Vec<_>>();
+    end_timer!(to_bigint_time);
+    coeffs
+}
+
+pub fn commit<F, G, P>(
+    powers: &Powers<F, G>,
+    polynomial: &P,
+) -> Result<Commitment<F, G>, KomodoError>
+where
+    F: PrimeField,
+    G: CurveGroup<ScalarField = F>,
+    P: DenseUVPolynomial<F>,
+{
+    check_degree_is_too_large(polynomial.degree(), powers.len())?;
+
+    let commit_time = start_timer!(|| format!(
+        "Committing to polynomial of degree {} with hiding_bound: {:?}",
+        polynomial.degree(),
+        hiding_bound,
+    ));
+
+    let (num_leading_zeros, plain_coeffs) = skip_leading_zeros_and_convert_to_bigints(polynomial);
+
+    let msm_time = start_timer!(|| "MSM to compute commitment to plaintext poly");
+    let commitment = <G as VariableBaseMSM>::msm_bigint(
+        &powers.0[num_leading_zeros..],
+        // FIXME: this is far from satisfying
+        &plain_coeffs.into_iter().collect::<Vec<_>>(),
+    );
+    end_timer!(msm_time);
+
+    end_timer!(commit_time);
+    Ok(Commitment(commitment.into()))
+}
+
+#[cfg(test)]
+mod tests {
+    use ark_bls12_381::{Fr, G1Projective};
+    use ark_ec::CurveGroup;
+    use ark_ff::PrimeField;
+    use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial};
+    use ark_std::test_rng;
+
+    use crate::error::KomodoError;
+
+    use super::{commit as commit_to_test, setup};
+
+    fn generate_setup_template<F: PrimeField, G: CurveGroup<ScalarField = F>>(nb_bytes: usize) {
+        let degree = nb_bytes / (F::MODULUS_BIT_SIZE as usize / 8);
+
+        let rng = &mut test_rng();
+
+        let powers = setup::<_, F, G>(degree, rng).unwrap();
+
+        assert_eq!(
+            powers.len(),
+            degree + 1,
+            "number of powers in the trusted setup does not match the number of coefficients"
+        );
+    }
+
+    #[test]
+    fn generate_setup() {
+        for nb_kb in [1, 2, 4, 8, 16, 32, 64] {
+            generate_setup_template::<Fr, G1Projective>(nb_kb * 1024);
+        }
+    }
+
+    fn generate_invalid_setup_template<F: PrimeField, G: CurveGroup<ScalarField = F>>() {
+        let rng = &mut test_rng();
+
+        let powers = setup::<_, F, G>(0, rng);
+        assert!(
+            powers.is_err(),
+            "creating a trusted setup for a degree 0 polynomial should NOT work"
+        );
+        assert_eq!(
+            powers.err().unwrap(),
+            KomodoError::DegreeIsZero,
+            "message should say the degree is zero"
+        );
+        assert!(
+            setup::<_, F, G>(1, rng).is_ok(),
+            "creating a trusted setup for any polynomial with degree at least 1 should work"
+        );
+    }
+
+    #[test]
+    fn generate_invalid_setup() {
+        generate_invalid_setup_template::<Fr, G1Projective>();
+    }
+
+    fn commit_template<F, G, P>(nb_bytes: usize)
+    where
+        F: PrimeField,
+        G: CurveGroup<ScalarField = F>,
+        P: DenseUVPolynomial<F>,
+    {
+        let degree = nb_bytes / (F::MODULUS_BIT_SIZE as usize / 8);
+
+        let rng = &mut test_rng();
+
+        let powers = setup::<_, F, G>(degree, rng).unwrap();
+
+        assert!(
+            commit_to_test(&powers, &P::rand(degree - 1, rng)).is_ok(),
+            "committing a polynomial with less coefficients than there are powers in the trusted setup should work"
+        );
+        assert!(
+            commit_to_test(&powers, &P::rand(degree, rng)).is_ok(),
+            "committing a polynomial with as many coefficients as there are powers in the trusted setup should work"
+        );
+        assert!(
+            commit_to_test(&powers, &P::rand(degree + 1, rng)).is_err(),
+            "committing a polynomial with more coefficients than there are powers in the trusted setup should NOT work"
+        );
+    }
+
+    #[test]
+    fn commit() {
+        for nb_kb in [1, 2, 4, 8, 16, 32, 64] {
+            commit_template::<Fr, G1Projective, DensePolynomial<Fr>>(nb_kb * 1024);
+        }
+    }
+}