From 60a0fadd46045f9a4b6d4fa6751befdd80dfb355 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Fri, 28 Mar 2025 11:01:31 +0100
Subject: [PATCH 01/56] fix bad link

---
 src/kzg.rs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/kzg.rs b/src/kzg.rs
index 0f5fe3b0..c90a23da 100644
--- a/src/kzg.rs
+++ b/src/kzg.rs
@@ -54,7 +54,7 @@ pub use crate::zk::ark_commit as commit;
 
 /// representation of a block of proven data.
 ///
-/// this is a wrapper around a [`fec::Shard`] with some additional cryptographic
+/// this is a wrapper around a [`crate::fec::Shard`] with some additional cryptographic
 /// information that allows to prove the integrity of said shard.
 #[derive(Debug, Clone, Default, PartialEq, CanonicalDeserialize, CanonicalSerialize)]
 pub struct Block<E: Pairing> {
-- 
GitLab


From 0bbd585a32bee02866220e5f5b3d8b64684b884b Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Fri, 28 Mar 2025 11:06:28 +0100
Subject: [PATCH 02/56] add `simple-mermaid` to TOML

---
 Cargo.toml | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/Cargo.toml b/Cargo.toml
index b2797fda..61488178 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -26,6 +26,7 @@ tracing = "0.1.40"
 tracing-subscriber = "0.3.17"
 ark-poly-commit = { git = "https://gitlab.isae-supaero.fr/a.stevan/poly-commit", version = "0.4.0", rev = "19fc0d4", optional = true }
 dragoonfri = { version = "0.1.0", optional = true}
+simple-mermaid = "0.2.0"
 
 [workspace]
 members = [
@@ -66,4 +67,4 @@ required-features = ["fri"]
 
 [[example]]
 name = "fec"
-required-features = ["fri"]
\ No newline at end of file
+required-features = ["fri"]
-- 
GitLab


From 2f45885c677327d3391136b1d72856f834691afe Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Fri, 28 Mar 2025 11:06:51 +0100
Subject: [PATCH 03/56] WIP: add KZG protocol as Mermaid sequence diagram

---
 src/kzg.mmd | 14 ++++++++++++++
 src/kzg.rs  |  2 ++
 2 files changed, 16 insertions(+)
 create mode 100644 src/kzg.mmd

diff --git a/src/kzg.mmd b/src/kzg.mmd
new file mode 100644
index 00000000..8ba5101b
--- /dev/null
+++ b/src/kzg.mmd
@@ -0,0 +1,14 @@
+sequenceDiagram
+    actor prover
+    actor verifier
+    Note over prover,verifier: generate KZG setup
+
+    %% Note left of prover: Text in note
+    %% Note right of verifier: Foo
+
+    %% user->>API: df add
+    %% API->>node: HTTP request: GET / POST
+    %% node-->>+lib: call
+    %% lib-->>-node: return
+    %% node->>API: HTTP response
+    %% API->>user: human friendly response
diff --git a/src/kzg.rs b/src/kzg.rs
index c90a23da..c3ba0865 100644
--- a/src/kzg.rs
+++ b/src/kzg.rs
@@ -4,6 +4,8 @@
 //! > - [Kate et al., 2010](https://link.springer.com/chapter/10.1007/978-3-642-17373-8_11)
 //! > - [Boneh et al., 2020](https://eprint.iacr.org/2020/081)
 //!
+#![doc = simple_mermaid::mermaid!("kzg.mmd")]
+//!
 //! # The protocol
 //! Here, we assume that the input data has been encoded with a _Reed-Solomon_ encoding, as can be
 //! done with the [crate::fec] module.
-- 
GitLab


From 2e9a349d329b85b23f832e83b96109c2fc13fb55 Mon Sep 17 00:00:00 2001
From: HEME Clement <clement.heme@student.isae-supaero.fr>
Date: Wed, 19 Mar 2025 09:35:22 +0100
Subject: [PATCH 04/56] Added Latex support for doc

---
 src/fec.rs | 32 ++++++++++++--------------------
 1 file changed, 12 insertions(+), 20 deletions(-)

diff --git a/src/fec.rs b/src/fec.rs
index 95149369..715ef2f8 100644
--- a/src/fec.rs
+++ b/src/fec.rs
@@ -31,6 +31,14 @@ pub struct Shard<F: PrimeField> {
 }
 
 impl<F: PrimeField> Shard<F> {
+    fn dual_combination(this: &Vec<F>, alpha: F, other: &Vec<F>, beta: F) -> Vec<F> {
+        this
+            .iter()
+            .zip(other.iter())
+            .map(|(s, o)| s.mul(alpha) + o.mul(beta))
+            .collect()
+    }
+ 
     /// compute the linear combination between two [`Shard`]s
     ///
     /// if we denote the [`Shard`] itself and the other [`Shard`] by $s$ and $o$ respectively, the
@@ -45,19 +53,9 @@ impl<F: PrimeField> Shard<F> {
 
         Shard {
             k: self.k,
-            linear_combination: self
-                .linear_combination
-                .iter()
-                .zip(other.linear_combination.iter())
-                .map(|(l, r)| l.mul(alpha) + r.mul(beta))
-                .collect(),
+            linear_combination: Self::dual_combination(&self.linear_combination, alpha, &other.linear_combination, beta),
             hash: self.hash.clone(),
-            data: self
-                .data
-                .iter()
-                .zip(other.data.iter())
-                .map(|(es, eo)| es.mul(alpha) + eo.mul(beta))
-                .collect(),
+            data: Self::dual_combination(&self.data, alpha, &other.data, beta),
             size: self.size,
         }
     }
@@ -104,10 +102,7 @@ pub fn recode_with_coeffs<F: PrimeField>(shards: &[Shard<F>], coeffs: &[F]) -> O
 /// > **Note**
 /// >
 /// > this is a wrapper around [`recode_with_coeffs`].
-pub fn recode_random<F: PrimeField>(
-    shards: &[Shard<F>],
-    rng: &mut impl RngCore,
-) -> Result<Option<Shard<F>>, KomodoError> {
+pub fn recode_random<F: PrimeField>(shards: &[Shard<F>], rng: &mut impl RngCore) -> Result<Option<Shard<F>>, KomodoError> {
     for (i, (s1, s2)) in shards.iter().zip(shards.iter().skip(1)).enumerate() {
         if s1.k != s2.k {
             return Err(KomodoError::IncompatibleShards(format!(
@@ -144,10 +139,7 @@ pub fn recode_random<F: PrimeField>(
 /// matrix. (see [`algebra::split_data_into_field_elements`])
 ///
 /// This is the inverse of [`decode`].
-pub fn encode<F: PrimeField>(
-    data: &[u8],
-    encoding_mat: &Matrix<F>,
-) -> Result<Vec<Shard<F>>, KomodoError> {
+pub fn encode<F: PrimeField>(data: &[u8], encoding_mat: &Matrix<F>) -> Result<Vec<Shard<F>>, KomodoError> {
     let hash = Sha256::hash(data).to_vec();
 
     let k = encoding_mat.height;
-- 
GitLab


From 5dc85d4386a0f2c0695b4918e4aa6929d8edaadd Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Fri, 28 Mar 2025 11:20:41 +0100
Subject: [PATCH 05/56] format

---
 src/fec.rs | 22 ++++++++++++++++------
 1 file changed, 16 insertions(+), 6 deletions(-)

diff --git a/src/fec.rs b/src/fec.rs
index 715ef2f8..eb55e9f8 100644
--- a/src/fec.rs
+++ b/src/fec.rs
@@ -32,13 +32,12 @@ pub struct Shard<F: PrimeField> {
 
 impl<F: PrimeField> Shard<F> {
     fn dual_combination(this: &Vec<F>, alpha: F, other: &Vec<F>, beta: F) -> Vec<F> {
-        this
-            .iter()
+        this.iter()
             .zip(other.iter())
             .map(|(s, o)| s.mul(alpha) + o.mul(beta))
             .collect()
     }
- 
+
     /// compute the linear combination between two [`Shard`]s
     ///
     /// if we denote the [`Shard`] itself and the other [`Shard`] by $s$ and $o$ respectively, the
@@ -53,7 +52,12 @@ impl<F: PrimeField> Shard<F> {
 
         Shard {
             k: self.k,
-            linear_combination: Self::dual_combination(&self.linear_combination, alpha, &other.linear_combination, beta),
+            linear_combination: Self::dual_combination(
+                &self.linear_combination,
+                alpha,
+                &other.linear_combination,
+                beta,
+            ),
             hash: self.hash.clone(),
             data: Self::dual_combination(&self.data, alpha, &other.data, beta),
             size: self.size,
@@ -102,7 +106,10 @@ pub fn recode_with_coeffs<F: PrimeField>(shards: &[Shard<F>], coeffs: &[F]) -> O
 /// > **Note**
 /// >
 /// > this is a wrapper around [`recode_with_coeffs`].
-pub fn recode_random<F: PrimeField>(shards: &[Shard<F>], rng: &mut impl RngCore) -> Result<Option<Shard<F>>, KomodoError> {
+pub fn recode_random<F: PrimeField>(
+    shards: &[Shard<F>],
+    rng: &mut impl RngCore,
+) -> Result<Option<Shard<F>>, KomodoError> {
     for (i, (s1, s2)) in shards.iter().zip(shards.iter().skip(1)).enumerate() {
         if s1.k != s2.k {
             return Err(KomodoError::IncompatibleShards(format!(
@@ -139,7 +146,10 @@ pub fn recode_random<F: PrimeField>(shards: &[Shard<F>], rng: &mut impl RngCore)
 /// matrix. (see [`algebra::split_data_into_field_elements`])
 ///
 /// This is the inverse of [`decode`].
-pub fn encode<F: PrimeField>(data: &[u8], encoding_mat: &Matrix<F>) -> Result<Vec<Shard<F>>, KomodoError> {
+pub fn encode<F: PrimeField>(
+    data: &[u8],
+    encoding_mat: &Matrix<F>,
+) -> Result<Vec<Shard<F>>, KomodoError> {
     let hash = Sha256::hash(data).to_vec();
 
     let k = encoding_mat.height;
-- 
GitLab


From 7be56b532e6c18cce2191502e62313d465dc8611 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Fri, 28 Mar 2025 11:21:15 +0100
Subject: [PATCH 06/56] revert "dual combination"

---
 src/fec.rs | 26 ++++++++++++--------------
 1 file changed, 12 insertions(+), 14 deletions(-)

diff --git a/src/fec.rs b/src/fec.rs
index eb55e9f8..95149369 100644
--- a/src/fec.rs
+++ b/src/fec.rs
@@ -31,13 +31,6 @@ pub struct Shard<F: PrimeField> {
 }
 
 impl<F: PrimeField> Shard<F> {
-    fn dual_combination(this: &Vec<F>, alpha: F, other: &Vec<F>, beta: F) -> Vec<F> {
-        this.iter()
-            .zip(other.iter())
-            .map(|(s, o)| s.mul(alpha) + o.mul(beta))
-            .collect()
-    }
-
     /// compute the linear combination between two [`Shard`]s
     ///
     /// if we denote the [`Shard`] itself and the other [`Shard`] by $s$ and $o$ respectively, the
@@ -52,14 +45,19 @@ impl<F: PrimeField> Shard<F> {
 
         Shard {
             k: self.k,
-            linear_combination: Self::dual_combination(
-                &self.linear_combination,
-                alpha,
-                &other.linear_combination,
-                beta,
-            ),
+            linear_combination: self
+                .linear_combination
+                .iter()
+                .zip(other.linear_combination.iter())
+                .map(|(l, r)| l.mul(alpha) + r.mul(beta))
+                .collect(),
             hash: self.hash.clone(),
-            data: Self::dual_combination(&self.data, alpha, &other.data, beta),
+            data: self
+                .data
+                .iter()
+                .zip(other.data.iter())
+                .map(|(es, eo)| es.mul(alpha) + eo.mul(beta))
+                .collect(),
             size: self.size,
         }
     }
-- 
GitLab


From 6725891566e70d15c50c98f79106936a6a6121c4 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Fri, 28 Mar 2025 11:21:45 +0100
Subject: [PATCH 07/56] revert toml changes

---
 Makefile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/Makefile b/Makefile
index bdc91117..e0a254ab 100644
--- a/Makefile
+++ b/Makefile
@@ -63,7 +63,7 @@ show:
 
 .PHONY: doc
 doc:
-	RUSTDOCFLAGS="--html-in-header katex.html" cargo doc --no-deps --open
+	cargo doc --document-private-items --no-deps --open
 
 .PHONY: build-examples
 build-examples:
-- 
GitLab


From b1c36d92990f681b5ce5edf5d57ffa104c15021f Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Fri, 28 Mar 2025 11:21:51 +0100
Subject: [PATCH 08/56] add Cargo config

---
 .cargo/config.toml | 2 ++
 1 file changed, 2 insertions(+)
 create mode 100644 .cargo/config.toml

diff --git a/.cargo/config.toml b/.cargo/config.toml
new file mode 100644
index 00000000..35cffad6
--- /dev/null
+++ b/.cargo/config.toml
@@ -0,0 +1,2 @@
+[build]
+rustdocflags = ["--html-in-header", "katex.html"]
-- 
GitLab


From 58da89b28c84cbeea97a6fdd7259956bf6e9cbdf Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Fri, 28 Mar 2025 11:59:56 +0100
Subject: [PATCH 09/56] done: add KZG protocol as Mermaid sequence diagram

---
 src/kzg.mmd | 22 +++++++++++++---------
 1 file changed, 13 insertions(+), 9 deletions(-)

diff --git a/src/kzg.mmd b/src/kzg.mmd
index 8ba5101b..660233a0 100644
--- a/src/kzg.mmd
+++ b/src/kzg.mmd
@@ -1,14 +1,18 @@
 sequenceDiagram
     actor prover
     actor verifier
-    Note over prover,verifier: generate KZG setup
 
-    %% Note left of prover: Text in note
-    %% Note right of verifier: Foo
+    Note over prover,verifier: generate trusted setup<br/>$$\ \text{TS} = ([\tau^j]_1)_{0 \leq j \leq k - 1}$$
 
-    %% user->>API: df add
-    %% API->>node: HTTP request: GET / POST
-    %% node-->>+lib: call
-    %% lib-->>-node: return
-    %% node->>API: HTTP response
-    %% API->>user: human friendly response
+    Note left of prover: split the data into $$\ m\ \text{polynomials of degree}\ k - 1$$
+    Note left of prover: compute commitments $$\ (c_i)_{0 \leq i \leq m - 1} \text{ as } [P_i(\tau)]_1$$
+    Note left of prover: compute shard $$\ s_{\alpha} = (P_i(\alpha))_{0 \leq i \leq m - 1}$$
+    Note left of prover: compute aggregation $$\ Q(X) = \sum\limits_{i=0}^{m - 1}r^i P_i(X) \text{ where } r = H(P_0(\alpha)|...|P_{m-1}(\alpha))$$
+    Note left of prover: compute proof $$\ \pi_{\alpha} = \left[ \frac{Q(\tau) - Q(\alpha)}{\tau - \alpha} \right]_1$$
+
+    prover->>verifier: $$\pi_{\alpha} \text{, } s_{\alpha} \text{, } (c_i)_{0 \leq i \leq m - 1}$$
+
+    Note right of verifier: compute $$\ r = H(s_0|...|s_{m - 1})$$
+    Note right of verifier: compute $$\ y = \sum\limits_{i = 0}^{m - 1} r^i s_i$$
+    Note right of verifier: compute $$\ c = \sum\limits_{i = 0}^{m - 1} r^i c_i$$
+    Note right of verifier: assert $$\ E(c - [y]_1, [1]_2) = E(\pi_{\alpha}, [\tau - \alpha]_2)$$
-- 
GitLab


From c035478121b4d40723d3f6d04ce513f632664aa2 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Fri, 28 Mar 2025 12:04:04 +0100
Subject: [PATCH 10/56] better

---
 src/kzg.mmd | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/src/kzg.mmd b/src/kzg.mmd
index 660233a0..9d2831ea 100644
--- a/src/kzg.mmd
+++ b/src/kzg.mmd
@@ -6,13 +6,13 @@ sequenceDiagram
 
     Note left of prover: split the data into $$\ m\ \text{polynomials of degree}\ k - 1$$
     Note left of prover: compute commitments $$\ (c_i)_{0 \leq i \leq m - 1} \text{ as } [P_i(\tau)]_1$$
-    Note left of prover: compute shard $$\ s_{\alpha} = (P_i(\alpha))_{0 \leq i \leq m - 1}$$
-    Note left of prover: compute aggregation $$\ Q(X) = \sum\limits_{i=0}^{m - 1}r^i P_i(X) \text{ where } r = H(P_0(\alpha)|...|P_{m-1}(\alpha))$$
+    Note left of prover: compute shard $$\ s_{\alpha} = (s_{\alpha, i})_{0 \leq i \leq m - 1} = (P_i(\alpha))_{0 \leq i \leq m - 1}$$
+    Note left of prover: compute aggregation $$\ Q(X) = \sum\limits_{i = 0}^{m - 1}r^i P_i(X) \text{ where } r = H(P_0(\alpha)|...|P_{m-1}(\alpha))$$
     Note left of prover: compute proof $$\ \pi_{\alpha} = \left[ \frac{Q(\tau) - Q(\alpha)}{\tau - \alpha} \right]_1$$
 
     prover->>verifier: $$\pi_{\alpha} \text{, } s_{\alpha} \text{, } (c_i)_{0 \leq i \leq m - 1}$$
 
     Note right of verifier: compute $$\ r = H(s_0|...|s_{m - 1})$$
-    Note right of verifier: compute $$\ y = \sum\limits_{i = 0}^{m - 1} r^i s_i$$
+    Note right of verifier: compute $$\ y = \sum\limits_{i = 0}^{m - 1} r^i s_{\alpha, i}$$
     Note right of verifier: compute $$\ c = \sum\limits_{i = 0}^{m - 1} r^i c_i$$
     Note right of verifier: assert $$\ E(c - [y]_1, [1]_2) = E(\pi_{\alpha}, [\tau - \alpha]_2)$$
-- 
GitLab


From 28150e2746a0d6a8fc53f53b71e7156de15362eb Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Fri, 28 Mar 2025 12:08:34 +0100
Subject: [PATCH 11/56] specify ranges for i and j only once

---
 src/kzg.mmd | 14 +++++++-------
 src/kzg.rs  |  5 +++++
 2 files changed, 12 insertions(+), 7 deletions(-)

diff --git a/src/kzg.mmd b/src/kzg.mmd
index 9d2831ea..5f1537a1 100644
--- a/src/kzg.mmd
+++ b/src/kzg.mmd
@@ -2,17 +2,17 @@ sequenceDiagram
     actor prover
     actor verifier
 
-    Note over prover,verifier: generate trusted setup<br/>$$\ \text{TS} = ([\tau^j]_1)_{0 \leq j \leq k - 1}$$
+    Note over prover,verifier: generate trusted setup<br/>$$\ \text{TS} = ([\tau^j]_1)$$
 
     Note left of prover: split the data into $$\ m\ \text{polynomials of degree}\ k - 1$$
-    Note left of prover: compute commitments $$\ (c_i)_{0 \leq i \leq m - 1} \text{ as } [P_i(\tau)]_1$$
-    Note left of prover: compute shard $$\ s_{\alpha} = (s_{\alpha, i})_{0 \leq i \leq m - 1} = (P_i(\alpha))_{0 \leq i \leq m - 1}$$
-    Note left of prover: compute aggregation $$\ Q(X) = \sum\limits_{i = 0}^{m - 1}r^i P_i(X) \text{ where } r = H(P_0(\alpha)|...|P_{m-1}(\alpha))$$
+    Note left of prover: compute commitments $$\ (c_i) \text{ as } [P_i(\tau)]_1$$
+    Note left of prover: compute shard $$\ s_{\alpha} = (s_{\alpha, i}) = (P_i(\alpha))$$
+    Note left of prover: compute aggregation $$\ Q(X) = \sum\limits_ir^i P_i(X) \text{ where } r = H(P_0(\alpha)|...|P_{m-1}(\alpha))$$
     Note left of prover: compute proof $$\ \pi_{\alpha} = \left[ \frac{Q(\tau) - Q(\alpha)}{\tau - \alpha} \right]_1$$
 
-    prover->>verifier: $$\pi_{\alpha} \text{, } s_{\alpha} \text{, } (c_i)_{0 \leq i \leq m - 1}$$
+    prover->>verifier: $$\pi_{\alpha} \text{, } s_{\alpha} \text{, } (c_i)$$
 
     Note right of verifier: compute $$\ r = H(s_0|...|s_{m - 1})$$
-    Note right of verifier: compute $$\ y = \sum\limits_{i = 0}^{m - 1} r^i s_{\alpha, i}$$
-    Note right of verifier: compute $$\ c = \sum\limits_{i = 0}^{m - 1} r^i c_i$$
+    Note right of verifier: compute $$\ y = \sum\limits_i r^i s_{\alpha, i}$$
+    Note right of verifier: compute $$\ c = \sum\limits_i r^i c_i$$
     Note right of verifier: assert $$\ E(c - [y]_1, [1]_2) = E(\pi_{\alpha}, [\tau - \alpha]_2)$$
diff --git a/src/kzg.rs b/src/kzg.rs
index c3ba0865..fa644ea5 100644
--- a/src/kzg.rs
+++ b/src/kzg.rs
@@ -4,6 +4,11 @@
 //! > - [Kate et al., 2010](https://link.springer.com/chapter/10.1007/978-3-642-17373-8_11)
 //! > - [Boneh et al., 2020](https://eprint.iacr.org/2020/081)
 //!
+//! > **Note**
+//! >
+//! > in the following
+//! > - $0 \leq i \leq m - 1$
+//! > - $0 \leq j \leq k - 1$
 #![doc = simple_mermaid::mermaid!("kzg.mmd")]
 //!
 //! # The protocol
-- 
GitLab


From b74dab5ea7d13a33fa2807ca0a4c7431117e55e2 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Fri, 28 Mar 2025 16:59:45 +0100
Subject: [PATCH 12/56] algebra

---
 src/algebra/mod.rs | 108 ++++++++++++++++++++++++++++++++-------------
 1 file changed, 77 insertions(+), 31 deletions(-)

diff --git a/src/algebra/mod.rs b/src/algebra/mod.rs
index 3ddf0219..3136aec2 100644
--- a/src/algebra/mod.rs
+++ b/src/algebra/mod.rs
@@ -13,17 +13,21 @@ use std::ops::{Div, Mul};
 
 pub mod linalg;
 
-/// split a sequence of raw bytes into valid field elements
+/// Splits a sequence of raw bytes into valid field elements in $\mathbb{F}_p$.
 ///
 /// [`split_data_into_field_elements`] supports padding the output vector of
-/// elements by giving a number that needs to divide the length of the vector.
+/// elements by giving a number that needs to divide the length of the vector, i.e. if we denote
+/// the number of output elements by $o$ and the desired length multiple by $m$ (`modulus`), then
+/// we have
+///
+/// $$ m | o $$
 ///
 /// # Example
-/// In the following example `Fp` is a small finite field with prime order $65537$ and which
+/// In the following example $\mathbb{F}_p$ is a small finite field with prime order $2^{16} + 1$ and which
 /// requires only two bytes to represent elements.
 ///
-/// 1. splitting `0x02000300`, which contains 4 bytes, will result in two elements of `Fp`, i.e. 2
-///    and 3
+/// 1. splitting `0x02000300`, which contains $4$ bytes, will result in two elements of $\mathbb{F}_p$, i.e. $2$
+///    and $3$
 /// ```
 /// # #[derive(ark_ff::MontConfig)]
 /// # #[modulus = "65537"]
@@ -40,9 +44,9 @@ pub mod linalg;
 /// );
 /// # }
 /// ```
-/// 2. splitting `0x0200030004000500`, which contains 8 bytes, and asking for a multiple of 3
-///    elements, will result in 6 elements of `Fp`, i.e. 2, 3, 4 and 5 which come from the data and
-///    two padding elements, set to 1.
+/// 2. splitting `0x0200030004000500`, which contains $8$ bytes, and asking for a multiple of $3$
+///    elements, will result in $6$ elements of $\mathbb{F}_p$, i.e. $2$, $3$, $4$ and $5$ which come from the data and
+///    two padding elements, set to $1$.
 /// ```
 /// # #[derive(ark_ff::MontConfig)]
 /// # #[modulus = "65537"]
@@ -81,9 +85,11 @@ pub fn split_data_into_field_elements<F: PrimeField>(bytes: &[u8], modulus: usiz
     elements
 }
 
-/// merges elliptic curve elements back into a sequence of bytes
+/// Merges elements of $\mathbb{F}_p$ back into a sequence of bytes.
 ///
-/// this is the inverse operation of [`split_data_into_field_elements`].
+/// > **Note**
+/// >
+/// > This is the inverse operation of [`split_data_into_field_elements`].
 pub(crate) fn merge_elements_into_bytes<F: PrimeField>(elements: &[F]) -> Vec<u8> {
     let mut bytes = vec![];
     for e in elements {
@@ -96,11 +102,16 @@ pub(crate) fn merge_elements_into_bytes<F: PrimeField>(elements: &[F]) -> Vec<u8
 }
 
 #[cfg(any(feature = "kzg", feature = "aplonk"))]
-/// compute the linear combination of polynomials
+/// Computes the linear combination of polynomials.
+///
+/// [`scalar_product_polynomial`] computes the linear combination $P$ of $n$
+/// polynomials $(P_i) \in \mathbb{F}_p\[X\]^n \sim \texttt{lhs}$ with
+/// coefficients $(c_i) \in \mathbb{F}_p^n \sim \texttt{rhs}$ as
+///
+/// $$P(X) = \sum\limits_{i = 0}^{n - 1} c_i P_i(X)$$
 ///
-/// if the _lhs_ are the coefficients, $(c_i)$ in a field $\mathbb{F}$, and the _rhs_ are the
-/// polynomials, $(p_i)$ with coefficients in $\mathbb{F}$, then the result of this is
-/// $$P(X) = \sum\limits_{i = 0}^{n - 1} c_i p_i(X)$$
+/// ## Preconditions
+/// - `lhs` and `rhs` should contain the same number of elements.
 pub(crate) fn scalar_product_polynomial<E, P>(lhs: &[E::ScalarField], rhs: &[P]) -> P
 where
     E: Pairing,
@@ -121,12 +132,16 @@ where
 }
 
 #[cfg(feature = "aplonk")]
-/// compute the scalar product between vectors of elements in $G_1$ and in $G_2$ respectively
+/// Computes the "_scalar product_" between vectors of elements in $\mathbb{G}_1$ and in $\mathbb{G}_2$ respectively.
+///
+/// [`scalar_product_pairing`] computes the "_pairing combination_" $c$ of $(a_i) \in \mathbb{G}_1^n \sim \texttt{lhs}$ and
+/// $(b_i) \in \mathbb{G}_2^n \sim \texttt{rhs}$ as
+///
+/// $$ c = \sum\limits_{i = 0}^{n - 1} E(a_i, b_i) $$
 ///
-/// if the _lhs_ are the elements of $G_1$, $(a_i)$, and the _rhs_ are the ones from $G_2$, $(b_i)$,
-/// then the result of this is
-/// $$c = \sum\limits_{i = 0}^{n - 1} E(a_i, b_i)$$
-/// where $E$ is a bilinear mapping from $G_1 \times G_2 \rightarrow G_T$
+/// where $E$ is a [bilinear mapping] from $\mathbb{G}_1 \times \mathbb{G}_2 \rightarrow \mathbb{G}_T$.
+///
+/// [bilinear mapping]: <https://en.wikipedia.org/wiki/Bilinear_map>
 pub(super) fn scalar_product_pairing<E: Pairing>(lhs: &[E::G1], rhs: &[E::G2]) -> PairingOutput<E> {
     lhs.iter()
         .zip(rhs.iter())
@@ -135,11 +150,16 @@ pub(super) fn scalar_product_pairing<E: Pairing>(lhs: &[E::G1], rhs: &[E::G2]) -
 }
 
 #[cfg(feature = "aplonk")]
-/// compute the scalar product between vectors of elements of a finite field $\mathbb{F}$
+/// Computes the [scalar product] between vectors of elements of a finite field $\mathbb{F}_p$
+/// associated with a "_pairing-friendly_" [elliptic curve] $(\mathbb{G}_1, \mathbb{G}_2, \mathbb{G}_T)$.
+///
+/// [`scalar_product`] computes the [scalar product] $c$ of $(a_i) \in \mathbb{F}_p^n \sim \texttt{lhs}$ and
+/// $(b_i) \in \mathbb{F}_p^n \sim \texttt{rhs}$ as
 ///
-/// if _lhs_ is the first vector, $(a_i)$, and _rhs_ is the second, $(b_i)$, then the result of this
-/// is
-/// $$c = \sum\limits_{i = 0}^{n - 1} a_i b_i$$
+/// $$ c = a \cdot b = \sum\limits_{i = 0}^{n - 1} a_i b_i $$
+///
+/// [scalar product]: <https://en.wikipedia.org/wiki/Dot_product>
+/// [elliptic curve]: <https://en.wikipedia.org/wiki/Elliptic_curve>
 pub(super) fn scalar_product<E: Pairing>(
     lhs: &[E::ScalarField],
     rhs: &[E::ScalarField],
@@ -148,13 +168,37 @@ pub(super) fn scalar_product<E: Pairing>(
 }
 
 #[cfg(feature = "aplonk")]
-/// see [`scalar_product`], but with _lhs_ a vector from $G_1$
+/// Computes a linear combination of elements of a finite field $\mathbb{F}_p$ associated with a
+/// "_pairing-friendly_" [elliptic curve] $(\mathbb{G}_1, \mathbb{G}_2, \mathbb{G}_T)$.
+///
+/// [`scalar_product_g1`] computes the linear combination $c$ of the $(a_i) \in \mathbb{G}_1^n \sim \texttt{lhs}$
+/// with coefficients $(c_i) \in \mathbb{F}_p^n \sim \texttt{rhs}$ as
+///
+/// $$ c = \sum\limits_{i = 0}^{n - 1} c_i a_i $$
+///
+/// > **Note**
+/// >
+/// > [`scalar_product_g1`] is the same as [`scalar_product`], but with elements from $\mathbb{G}_1$.
+///
+/// [elliptic curve]: <https://en.wikipedia.org/wiki/Elliptic_curve>
 pub(super) fn scalar_product_g1<E: Pairing>(lhs: &[E::G1], rhs: &[E::ScalarField]) -> E::G1 {
     lhs.iter().zip(rhs.iter()).map(|(l, r)| l.mul(r)).sum()
 }
 
 #[cfg(feature = "aplonk")]
-/// see [`scalar_product`], but with _lhs_ a vector from $G_2$
+/// Computes a linear combination of elements of a finite field $\mathbb{F}_p$ associated with a
+/// "_pairing-friendly_" [elliptic curve] $(\mathbb{G}_1, \mathbb{G}_2, \mathbb{G}_T)$.
+///
+/// [`scalar_product_g2`] computes the linear combination $c$ of the $(a_i) \in \mathbb{G}_2^n \sim \texttt{lhs}$
+/// with coefficients $(c_i) \in \mathbb{F}_p^n \sim \texttt{rhs}$ as
+///
+/// $$ c = \sum\limits_{i = 0}^{n - 1} c_i a_i $$
+///
+/// > **Note**
+/// >
+/// > [`scalar_product_g2`] is the same as [`scalar_product`], but with elements from $\mathbb{G}_2$.
+///
+/// [elliptic curve]: <https://en.wikipedia.org/wiki/Elliptic_curve>
 pub(super) fn scalar_product_g2<E: Pairing>(lhs: &[E::G2], rhs: &[E::ScalarField]) -> E::G2 {
     lhs.iter().zip(rhs.iter()).map(|(l, r)| l.mul(r)).sum()
 }
@@ -163,7 +207,7 @@ pub(super) fn scalar_product_g2<E: Pairing>(lhs: &[E::G2], rhs: &[E::ScalarField
 pub(super) mod vector {
     use ark_ff::Zero;
 
-    /// return [0, 0, ..., 0] of size *n* on some group
+    /// Returns $(0, ..., 0) \in \mathbb{F}_p^n$.
     pub fn zero<Z: Zero + Clone>(capacity: usize) -> Vec<Z> {
         let mut vector = Vec::with_capacity(capacity);
         vector.resize(capacity, Z::zero());
@@ -172,12 +216,14 @@ pub(super) mod vector {
     }
 }
 
-/// compute the successive powers of a scalar group element
+/// Computes the successive powers of a scalar $r$ in a field $\mathbb{F}_p$ associated with a
+/// "_pairing-friendly_" [elliptic curve] $(\mathbb{G}_1, \mathbb{G}_2, \mathbb{G}_T)$.
+///
+/// [`powers_of`] will compute a vector $R$ from a scalar $r \in \mathbb{F}_p$ as
+///
+/// $$ R = (1, r, r^2, ..., r^{n-1}) $$
 ///
-/// if the scalar number is called *r*, then [`powers_of`] will return the
-/// following vector:
-///         [1, r, r^2, ..., r^(n-1)]
-/// where *n* is the number of powers
+/// where $n$ is the desired number of powers.
 #[cfg(any(feature = "kzg", feature = "aplonk"))]
 pub(crate) fn powers_of<E: Pairing>(step: E::ScalarField, nb_powers: usize) -> Vec<E::ScalarField> {
     let mut powers = Vec::with_capacity(nb_powers);
-- 
GitLab


From 6ba520979159291d69866c7071f42981c94f2a54 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Fri, 28 Mar 2025 16:59:58 +0100
Subject: [PATCH 13/56] compile doc with all features

---
 Makefile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/Makefile b/Makefile
index e0a254ab..213829e4 100644
--- a/Makefile
+++ b/Makefile
@@ -63,7 +63,7 @@ show:
 
 .PHONY: doc
 doc:
-	cargo doc --document-private-items --no-deps --open
+	cargo doc --all-features --document-private-items --no-deps --open
 
 .PHONY: build-examples
 build-examples:
-- 
GitLab


From a15ae137baf4d262278c1b7cee28cd532015576a Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Fri, 28 Mar 2025 17:08:12 +0100
Subject: [PATCH 14/56] check length of `algebra::powers_of`

---
 src/algebra/mod.rs | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/src/algebra/mod.rs b/src/algebra/mod.rs
index 3136aec2..ad3dc688 100644
--- a/src/algebra/mod.rs
+++ b/src/algebra/mod.rs
@@ -320,10 +320,9 @@ mod tests {
         const POWER: usize = 10;
         let r = E::ScalarField::rand(rng);
 
-        assert_eq!(
-            super::powers_of::<E>(r, POWER + 1).last().unwrap(),
-            &r.pow([POWER as u64])
-        );
+        let res = super::powers_of::<E>(r, POWER + 1);
+        assert_eq!(res.len(), POWER + 1);
+        assert_eq!(res.last().unwrap(), &r.pow([POWER as u64]));
     }
 
     #[cfg(any(feature = "kzg", feature = "aplonk"))]
-- 
GitLab


From f2934d2171e4fb37b800d9d02b9717cc0486ef68 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Fri, 28 Mar 2025 17:26:20 +0100
Subject: [PATCH 15/56] update Katex to 0.16.21

---
 katex.html | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/katex.html b/katex.html
index 32ac35a4..bfd783cb 100644
--- a/katex.html
+++ b/katex.html
@@ -1,6 +1,7 @@
-<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/katex.min.css" integrity="sha384-9eLZqc9ds8eNjO3TmqPeYcDj8n+Qfa4nuSiGYa6DjLNcv9BtN69ZIulL9+8CqC9Y" crossorigin="anonymous">
-<script src="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/katex.min.js"                  integrity="sha384-K3vbOmF2BtaVai+Qk37uypf7VrgBubhQreNQe9aGsz9lB63dIFiQVlJbr92dw2Lx" crossorigin="anonymous"></script>
-<script src="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/contrib/auto-render.min.js"    integrity="sha384-kmZOZB5ObwgQnS/DuDg6TScgOiWWBiVt0plIRkZCmE6rDZGrEOQeHM5PcHi+nyqe" crossorigin="anonymous"></script>
+<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.16.21/dist/katex.min.css" integrity="sha384-zh0CIslj+VczCZtlzBcjt5ppRcsAmDnRem7ESsYwWwg3m/OaJ2l4x7YBZl9Kxxib" crossorigin="anonymous">
+<script defer src="https://cdn.jsdelivr.net/npm/katex@0.16.21/dist/katex.min.js" integrity="sha384-Rma6DA2IPUwhNxmrB/7S3Tno0YY7sFu9WSYMCuulLhIqYSGZ2gKCJWIqhBWqMQfh" crossorigin="anonymous"></script>
+<script defer src="https://cdn.jsdelivr.net/npm/katex@0.16.21/dist/contrib/auto-render.min.js" integrity="sha384-hCXGrW6PitJEwbkoStFjeJxv+fSOOQKOPbJxSfM6G5sWZjAyWhXiTIIAmQqnlLlh" crossorigin="anonymous"
+    onload="renderMathInElement(document.body);"></script>
 <script>
     document.addEventListener("DOMContentLoaded", function() {
         renderMathInElement(document.body, {
-- 
GitLab


From ce35d2608897de92ed4b85ebf08d7de02f9b99d9 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Fri, 28 Mar 2025 17:26:39 +0100
Subject: [PATCH 16/56] WIP: linalg

---
 src/algebra/linalg.rs | 47 +++++++++++++++++++++----------------------
 src/algebra/mod.rs    |  2 +-
 2 files changed, 24 insertions(+), 25 deletions(-)

diff --git a/src/algebra/linalg.rs b/src/algebra/linalg.rs
index 5293b2c2..2d1e8975 100644
--- a/src/algebra/linalg.rs
+++ b/src/algebra/linalg.rs
@@ -1,38 +1,39 @@
-//! some linear algebra fun
+//! Some linear algebra fun over elements in $\mathbb{F}_p$.
 //!
-//! this module mainly contains an implementation of matrices over a finite
-//! field.
+//! This module mainly contains an implementation of matrices over a finite
+//! field $\mathbb{F}_p$.
 use ark_ff::Field;
 use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
 use ark_std::rand::{Rng, RngCore};
 
 use crate::error::KomodoError;
 
-/// a matrix defined over a finite field
+/// A matrix defined over a finite field $\mathbb{F}_p$.
 ///
-/// internally, a matrix is just a vector of field elements whose length is
+/// Internally, a matrix is just a vector of field elements whose length is
 /// exactly the width times the height and where elements are organized row by
 /// row.
 #[derive(Clone, PartialEq, Default, Debug, CanonicalSerialize, CanonicalDeserialize)]
 pub struct Matrix<T: Field> {
+    /// $h \times w$ elements in $\mathbb{F}_p$.
     pub elements: Vec<T>,
+    /// the number of rows $h$.
     pub height: usize,
+    /// the number of columns $w$.
     pub width: usize,
 }
 
 impl<T: Field> Matrix<T> {
-    /// build a matrix from a diagonal of elements
+    /// Builds a matrix from a diagonal of elements in $\mathbb{F}_p$.
     ///
     /// # Example
-    /// building a diagonal matrix from the diagonal `[1, 2, 3, 4]` would give
-    /// ```text
-    /// [
-    ///     [1, 0, 0, 0],
-    ///     [0, 2, 0, 0],
-    ///     [0, 0, 3, 0],
-    ///     [0, 0, 0, 4],
-    /// ]
-    /// ```
+    /// Building a diagonal matrix from the diagonal $(1, 2, 3, 4)$ would give
+    /// $ \begin{pmatrix}
+    ///     1 & . & . & . \\\\
+    ///     . & 2 & . & . \\\\
+    ///     . & . & 3 & . \\\\
+    ///     . & . & . & 4 \\\\
+    /// \end{pmatrix} $.
     fn from_diagonal(diagonal: Vec<T>) -> Self {
         let size = diagonal.len();
 
@@ -49,17 +50,15 @@ impl<T: Field> Matrix<T> {
         }
     }
 
-    /// build the identity matrix of a given size
+    /// Builds the identity matrix $I_n$ of a given size $n$.
     ///
     /// # Example
-    /// the identity of size 3 is
-    /// ```text
-    /// [
-    ///     [1, 0, 0],
-    ///     [0, 1, 0],
-    ///     [0, 0, 1],
-    /// ]
-    /// ```
+    /// The identity of size $3$ is
+    /// $ I_3 = \begin{pmatrix}
+    ///     1 & . & . \\\\
+    ///     . & 1 & . \\\\
+    ///     . & . & 1 \\\\
+    /// \end{pmatrix} $.
     fn identity(size: usize) -> Self {
         Self::from_diagonal(vec![T::one(); size])
     }
diff --git a/src/algebra/mod.rs b/src/algebra/mod.rs
index ad3dc688..a2f3b9f1 100644
--- a/src/algebra/mod.rs
+++ b/src/algebra/mod.rs
@@ -1,4 +1,4 @@
-//! Manipulate finite field elements
+//! Manipulate elements from finite field $\mathbb{F}_p$.
 #[cfg(any(feature = "kzg", feature = "aplonk"))]
 use ark_ec::pairing::Pairing;
 #[cfg(feature = "aplonk")]
-- 
GitLab


From 20ef2e05d1886c9c8713f188996ae32b70ae3c42 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Fri, 28 Mar 2025 17:36:23 +0100
Subject: [PATCH 17/56] add preconditions to "scalar product" functions

---
 src/algebra/mod.rs | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/src/algebra/mod.rs b/src/algebra/mod.rs
index a2f3b9f1..c72e584a 100644
--- a/src/algebra/mod.rs
+++ b/src/algebra/mod.rs
@@ -141,6 +141,9 @@ where
 ///
 /// where $E$ is a [bilinear mapping] from $\mathbb{G}_1 \times \mathbb{G}_2 \rightarrow \mathbb{G}_T$.
 ///
+/// ## Preconditions
+/// - `lhs` and `rhs` should contain the same number of elements.
+///
 /// [bilinear mapping]: <https://en.wikipedia.org/wiki/Bilinear_map>
 pub(super) fn scalar_product_pairing<E: Pairing>(lhs: &[E::G1], rhs: &[E::G2]) -> PairingOutput<E> {
     lhs.iter()
@@ -158,6 +161,9 @@ pub(super) fn scalar_product_pairing<E: Pairing>(lhs: &[E::G1], rhs: &[E::G2]) -
 ///
 /// $$ c = a \cdot b = \sum\limits_{i = 0}^{n - 1} a_i b_i $$
 ///
+/// ## Preconditions
+/// - `lhs` and `rhs` should contain the same number of elements.
+///
 /// [scalar product]: <https://en.wikipedia.org/wiki/Dot_product>
 /// [elliptic curve]: <https://en.wikipedia.org/wiki/Elliptic_curve>
 pub(super) fn scalar_product<E: Pairing>(
@@ -180,6 +186,9 @@ pub(super) fn scalar_product<E: Pairing>(
 /// >
 /// > [`scalar_product_g1`] is the same as [`scalar_product`], but with elements from $\mathbb{G}_1$.
 ///
+/// ## Preconditions
+/// - `lhs` and `rhs` should contain the same number of elements.
+///
 /// [elliptic curve]: <https://en.wikipedia.org/wiki/Elliptic_curve>
 pub(super) fn scalar_product_g1<E: Pairing>(lhs: &[E::G1], rhs: &[E::ScalarField]) -> E::G1 {
     lhs.iter().zip(rhs.iter()).map(|(l, r)| l.mul(r)).sum()
-- 
GitLab


From af5f9e4fbe897382a7b5d8ed36464e74b580e205 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Tue, 1 Apr 2025 10:14:34 +0200
Subject: [PATCH 18/56] finish linalg

---
 src/algebra/linalg.rs | 87 ++++++++++++++++++++++++-------------------
 1 file changed, 48 insertions(+), 39 deletions(-)

diff --git a/src/algebra/linalg.rs b/src/algebra/linalg.rs
index 2d1e8975..f666aacf 100644
--- a/src/algebra/linalg.rs
+++ b/src/algebra/linalg.rs
@@ -63,20 +63,29 @@ impl<T: Field> Matrix<T> {
         Self::from_diagonal(vec![T::one(); size])
     }
 
-    /// build a Vandermonde matrix for some seed points
+    /// Builds a _Vandermonde_ matrix for some _seed points_.
     ///
-    /// actually, this is the transpose of the Vandermonde matrix defined in the
+    /// Actually, this is the transpose of the Vandermonde matrix defined in the
     /// [Wikipedia article][article], i.e. there are as many columns as there
-    /// are seed points and there are as many rows as there are powers of the
-    /// seed points.
+    /// are seed points, the $(\alpha_i)_{1 \leq i \leq m}$, and there are as
+    /// many rows, $n$, as there are powers of the seed points.
+    ///
+    /// $ M = V_n(\alpha_1, ..., \alpha_m)^T = \begin{pmatrix}
+    ///     1                & 1                & ...    & 1                \\\\
+    ///     \alpha_1         & \alpha_2         & ...    & \alpha_m         \\\\
+    ///     \alpha_1^2       & \alpha_2^2       & ...    & \alpha_m^2       \\\\
+    ///     \vdots           & \vdots           & \ddots & \vdots           \\\\
+    ///     \alpha_1^{n - 1} & \alpha_2^{n - 1} & ...    & \alpha_m^{n - 1} \\\\
+    /// \end{pmatrix} $
     ///
     /// > **Note**
     /// >
-    /// > if you are sure the points are distinct and don't want to perform any
+    /// > If you are sure the points are distinct and don't want to perform any
     /// > runtime check to ensure that condition, have a look at
     /// > [`Self::vandermonde_unchecked`].
     ///
     /// # Example
+    /// Let's compute $V_4(0, 1, 2, 3, 4)^T$:
     /// ```rust
     /// # use ark_ff::Field;
     /// # use komodo::algebra::linalg::Matrix;
@@ -120,7 +129,7 @@ impl<T: Field> Matrix<T> {
         Ok(Self::vandermonde_unchecked(points, height))
     }
 
-    /// the unchecked version of [`Self::vandermonde`]
+    /// The unchecked version of [`Self::vandermonde`].
     pub fn vandermonde_unchecked(points: &[T], height: usize) -> Self {
         let width = points.len();
 
@@ -142,7 +151,7 @@ impl<T: Field> Matrix<T> {
         }
     }
 
-    /// build a completely random matrix of shape $n \times m$
+    /// Builds a completely random matrix of shape $n \times m$.
     pub fn random<R: RngCore>(n: usize, m: usize, rng: &mut R) -> Self {
         Self {
             elements: (0..(n * m)).map(|_| T::from(rng.gen::<u128>())).collect(),
@@ -151,11 +160,11 @@ impl<T: Field> Matrix<T> {
         }
     }
 
-    /// build a matrix from a "_matrix_" of elements
+    /// Builds a matrix from a "_matrix_" of elements.
     ///
     /// > **Note**
     /// >
-    /// > if you are sure each row should have the same length and don't want to
+    /// > If you are sure each row should have the same length and don't want to
     /// > perform any runtime check to ensure that condition, have a look at
     /// > [`Self::from_vec_vec_unchecked`].
     ///
@@ -219,7 +228,7 @@ impl<T: Field> Matrix<T> {
         Ok(Self::from_vec_vec_unchecked(matrix))
     }
 
-    /// the unchecked version of [`Self::from_vec_vec`]
+    /// The unchecked version of [`Self::from_vec_vec`].
     pub fn from_vec_vec_unchecked(matrix: Vec<Vec<T>>) -> Self {
         let height = matrix.len();
         let width = matrix[0].len();
@@ -247,11 +256,11 @@ impl<T: Field> Matrix<T> {
         self.elements[i * self.width + j] = value;
     }
 
-    /// extract a single column from the matrix
+    /// Extracts a single column from the matrix.
     ///
     /// > **Note**
     /// >
-    /// > returns `None` if the provided index is out of bounds
+    /// > Returns `None` if the provided index is out of bounds.
     pub(crate) fn get_col(&self, j: usize) -> Option<Vec<T>> {
         if j >= self.width {
             return None;
@@ -260,14 +269,14 @@ impl<T: Field> Matrix<T> {
         Some((0..self.height).map(|i| self.get(i, j)).collect())
     }
 
-    /// compute _row = row / value_
+    /// Computes $\text{row} = \frac{\text{row}}{\text{value}}$.
     fn divide_row_by(&mut self, row: usize, value: T) {
         for j in 0..self.width {
             self.set(row, j, self.get(row, j) / value);
         }
     }
 
-    /// compute _destination = destination + source * value_
+    /// Computes $\text{destination} = \text{destination} + \text{source} \times \text{value}$.
     fn multiply_row_by_and_add_to_row(&mut self, source: usize, value: T, destination: usize) {
         for j in 0..self.width {
             self.set(
@@ -278,13 +287,11 @@ impl<T: Field> Matrix<T> {
         }
     }
 
-    /// compute the inverse of the matrix
+    /// Computes the inverse of the matrix.
     ///
-    /// > **Note**
-    /// >
-    /// > the matrix should be
-    /// > - square
-    /// > - invertible
+    /// If $M \in \mathcal{M}_{n \times n}(\mathbb{F}_p)$ is an invertible matrix,
+    /// then [`Self::invert`] computes $M^{-1}$ such that
+    /// $$ MM^{-1} = M^{-1}M = I_n$$
     pub fn invert(&self) -> Result<Self, KomodoError> {
         if self.height != self.width {
             return Err(KomodoError::NonSquareMatrix(self.height, self.width));
@@ -314,29 +321,30 @@ impl<T: Field> Matrix<T> {
         Ok(inverse)
     }
 
-    /// swap rows `i` and `j`, inplace
+    /// Swaps rows $i$ and $j$, inplace.
     ///
     /// > **Note**
     /// >
-    /// > this function assumes both `i` and `j` are in bounds, unexpected
-    /// > results are expected if `i` or `j` are out of bounds.
+    /// > This function assumes both $i$ and $j$ are in bounds, unexpected
+    /// > results are expected if $i$ or $j$ are out of bounds.
     fn swap_rows(&mut self, i: usize, j: usize) {
         for k in 0..self.width {
             self.elements.swap(i * self.width + k, j * self.width + k);
         }
     }
 
-    /// compute the rank of the matrix
+    /// Computes the rank of the matrix.
+    ///
+    /// Let $M \in \mathcal{M}_{n \times m}(\mathbb{F}_p)$ and $r(M)$ its rank:
+    /// - the rank is always smaller than the min between the height and the
+    ///   width of any matrix, $r(M) \leq \min(n, m)$
+    /// - a square and invertible matrix will have _full rank_, i.e. it will
+    ///   be equal to its size, if $M$ is invertible, then $r(M) = n$
     ///
     /// > **Note**
     /// >
-    /// > see the [_Wikipedia article_](https://en.wikipedia.org/wiki/Rank_(linear_algebra))
+    /// > See the [_Wikipedia article_](https://en.wikipedia.org/wiki/Rank_(linear_algebra))
     /// > for more information
-    /// >
-    /// > - the rank is always smaller than the min between the height and the
-    /// >   width of any matrix.
-    /// > - a square and invertible matrix will have _full rank_, i.e. it will
-    /// >   be equal to its size.
     pub fn rank(&self) -> usize {
         let mut mat = self.clone();
         let mut i = 0;
@@ -376,15 +384,16 @@ impl<T: Field> Matrix<T> {
         nb_non_zero_rows
     }
 
-    /// compute the matrix multiplication with another matrix
+    /// Computes the matrix multiplication with another matrix.
     ///
-    /// if `lhs` represents a matrix $A$ and `rhs` is the representation of
-    /// another matrix $B$, then `lhs.mul(rhs)` will compute $A \times B$
+    /// Let $A \in \mathcal{M}_{a \times b}(\mathbb{F}_p) \sim \texttt{lhs}$ and
+    /// $B \in \mathcal{M}\_{c \times d}(\mathbb{F}_p) \sim \texttt{rhs}$ then
+    /// `lhs.mul(rhs)` will compute $A \times B$.
     ///
     /// > **Note**
     /// >
-    /// > both matrices should have compatible shapes, i.e. if `self` has shape
-    /// > `(n, m)` and `rhs` has shape `(p, q)`, then `m == p`.
+    /// > Both matrices should have compatible shapes, i.e. if `self` has shape
+    /// > `(a, b)` and `rhs` has shape `(c, d)`, then `b == c`.
     pub fn mul(&self, rhs: &Self) -> Result<Self, KomodoError> {
         if self.width != rhs.height {
             return Err(KomodoError::IncompatibleMatrixShapes(
@@ -415,7 +424,7 @@ impl<T: Field> Matrix<T> {
         })
     }
 
-    /// compute the transpose of the matrix
+    /// Computes the transpose of the matrix.
     ///
     /// > **Note**
     /// >
@@ -440,11 +449,11 @@ impl<T: Field> Matrix<T> {
         }
     }
 
-    /// truncate the matrix to the provided shape, from right and bottom
+    /// Truncates the matrix to the provided shape, from right and bottom.
     ///
     /// # Example
-    /// if a matrix has shape `(10, 11)` and is truncated to `(5, 7)`, the 5
-    /// bottom rows and 4 right columns will be removed.
+    /// If a matrix has shape $(10, 11)$ and is truncated to $(5, 7)$, the $5$
+    /// bottom rows and $4$ right columns will be removed.
     pub(crate) fn truncate(&self, rows: Option<usize>, cols: Option<usize>) -> Self {
         let width = if let Some(w) = cols {
             self.width - w
-- 
GitLab


From 3095c620a2539fd8984c06eb253fd960ed2bc9c1 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Tue, 1 Apr 2025 10:32:19 +0200
Subject: [PATCH 19/56] linalg: use exact matches and struct enums

---
 src/algebra/linalg.rs | 77 ++++++++++++++++++++++---------------------
 src/error.rs          | 28 +++++++++-------
 2 files changed, 56 insertions(+), 49 deletions(-)

diff --git a/src/algebra/linalg.rs b/src/algebra/linalg.rs
index f666aacf..3de62a38 100644
--- a/src/algebra/linalg.rs
+++ b/src/algebra/linalg.rs
@@ -117,11 +117,11 @@ impl<T: Field> Matrix<T> {
         for i in 0..points.len() {
             for j in (i + 1)..points.len() {
                 if points[i] == points[j] {
-                    return Err(KomodoError::InvalidVandermonde(
-                        i,
-                        j,
-                        format!("{}", points[i]),
-                    ));
+                    return Err(KomodoError::InvalidVandermonde {
+                        first_index: i,
+                        second_index: j,
+                        value_repr: format!("{}", points[i]),
+                    });
                 }
             }
         }
@@ -216,12 +216,11 @@ impl<T: Field> Matrix<T> {
         let width = matrix[0].len();
         for (i, row) in matrix.iter().enumerate() {
             if row.len() != width {
-                return Err(KomodoError::InvalidMatrixElements(format!(
-                    "expected rows to be of same length {}, found {} at row {}",
-                    width,
-                    row.len(),
-                    i
-                )));
+                return Err(KomodoError::InvalidMatrixElements {
+                    expected: width,
+                    found: row.len(),
+                    row: i,
+                });
             }
         }
 
@@ -396,12 +395,10 @@ impl<T: Field> Matrix<T> {
     /// > `(a, b)` and `rhs` has shape `(c, d)`, then `b == c`.
     pub fn mul(&self, rhs: &Self) -> Result<Self, KomodoError> {
         if self.width != rhs.height {
-            return Err(KomodoError::IncompatibleMatrixShapes(
-                self.height,
-                self.width,
-                rhs.height,
-                rhs.width,
-            ));
+            return Err(KomodoError::IncompatibleMatrixShapes {
+                left: (self.height, self.width),
+                right: (rhs.height, rhs.width),
+            });
         }
 
         let height = self.height;
@@ -622,10 +619,14 @@ mod tests {
 
         let matrix = Matrix::<Fr>::from_vec_vec(mat_to_elements(vec![vec![0], vec![0, 0]]));
         assert!(matrix.is_err());
-        assert!(matches!(
+        assert_eq!(
             matrix.err().unwrap(),
-            KomodoError::InvalidMatrixElements(..)
-        ));
+            KomodoError::InvalidMatrixElements {
+                expected: 1,
+                found: 2,
+                row: 1,
+            }
+        );
     }
 
     #[test]
@@ -667,10 +668,13 @@ mod tests {
         ]))
         .unwrap();
 
-        assert!(matches!(
+        assert_eq!(
             a.mul(&Matrix::<Fr>::from_vec_vec(mat_to_elements(vec![vec![1, 2]])).unwrap()),
-            Err(KomodoError::IncompatibleMatrixShapes(3, 3, 1, 2))
-        ));
+            Err(KomodoError::IncompatibleMatrixShapes {
+                left: (3, 3),
+                right: (1, 2)
+            })
+        );
 
         let product = a.mul(&b).unwrap();
         let expected = Matrix::<Fr>::from_vec_vec(mat_to_elements(vec![
@@ -721,17 +725,11 @@ mod tests {
                 .unwrap()
                 .invert();
         assert!(inverse.is_err());
-        assert!(matches!(
-            inverse.err().unwrap(),
-            KomodoError::NonSquareMatrix(..)
-        ));
+        assert_eq!(inverse.err().unwrap(), KomodoError::NonSquareMatrix(2, 3));
 
         let inverse = Matrix::<Fr>::from_diagonal(vec_to_elements(vec![0, 3, 4])).invert();
         assert!(inverse.is_err());
-        assert!(matches!(
-            inverse.err().unwrap(),
-            KomodoError::NonInvertibleMatrix(0)
-        ));
+        assert_eq!(inverse.err().unwrap(), KomodoError::NonInvertibleMatrix(0));
 
         let inverse = Matrix::<Fr>::from_vec_vec(mat_to_elements(vec![
             vec![1, 1, 0],
@@ -741,19 +739,22 @@ mod tests {
         .unwrap()
         .invert();
         assert!(inverse.is_err());
-        assert!(matches!(
-            inverse.err().unwrap(),
-            KomodoError::NonInvertibleMatrix(1)
-        ));
+        assert_eq!(inverse.err().unwrap(), KomodoError::NonInvertibleMatrix(1));
     }
 
     #[test]
     fn vandermonde() {
-        assert!(Matrix::<Fr>::vandermonde(&vec_to_elements(vec![0, 4, 2, 3, 4]), 4).is_err());
+        assert_eq!(
+            Matrix::<Fr>::vandermonde(&vec_to_elements(vec![0, 4, 2, 3, 4]), 4),
+            Err(KomodoError::InvalidVandermonde {
+                first_index: 1,
+                second_index: 4,
+                value_repr: "4".to_string()
+            }),
+        );
         assert!(Matrix::<Fr>::vandermonde(&vec_to_elements(vec![0, 1, 2, 3, 4]), 4).is_ok());
 
-        let actual =
-            Matrix::<Fr>::vandermonde_unchecked(&mat_to_elements(vec![vec![0, 1, 2, 3, 4]])[0], 4);
+        let actual = Matrix::<Fr>::vandermonde_unchecked(&vec_to_elements(vec![0, 1, 2, 3, 4]), 4);
         #[rustfmt::skip]
         let expected = Matrix::from_vec_vec(mat_to_elements(vec![
             vec![1, 1, 1,  1,  1],
diff --git a/src/error.rs b/src/error.rs
index cec1afd2..5c38d8ab 100644
--- a/src/error.rs
+++ b/src/error.rs
@@ -11,25 +11,31 @@ use thiserror::Error;
 /// - related to proving the shards
 #[derive(Clone, Debug, Error, PartialEq)]
 pub enum KomodoError {
-    /// `{0}` is a custom error message when a matrix is invalid.
-    #[error("Invalid matrix elements: {0}")]
-    InvalidMatrixElements(String),
+    #[error("expected rows to be of same length {expected}, found {found} at row {row}")]
+    InvalidMatrixElements {
+        expected: usize,
+        found: usize,
+        row: usize,
+    },
     /// `{0}` and `{1}` are the shape of the rectangular matrix.
     #[error("Matrix is not a square, ({0} x {1})")]
     NonSquareMatrix(usize, usize),
     /// `{0}` is the ID of the row where the matrix inversion failed.
     #[error("Matrix is not invertible at row {0}")]
     NonInvertibleMatrix(usize),
-    /// `{0}` and `{1}` are the shape of the left matrix and `{2}` and `{3}` are the shape of the
-    /// right matrix.
-    #[error("Matrices don't have compatible shapes: ({0} x {1}) and ({2} x {3})")]
-    IncompatibleMatrixShapes(usize, usize, usize, usize),
-    /// `{0}` and `{1}` are the IDs of the non-distinct _Vandermonde_ points and `{2}` is the list
-    /// of all the _Vandermonde_ points.
+    #[error("Matrices don't have compatible shapes: {left:?}, {right:?}")]
+    IncompatibleMatrixShapes {
+        left: (usize, usize),
+        right: (usize, usize),
+    },
     #[error(
-        "Seed points of a Vandermonde matrix should be distinct: {0} and {1} are the same ({2})"
+        "Seed points of a Vandermonde matrix should be distinct: {first_index} and {second_index} are the same ({value_repr})"
     )]
-    InvalidVandermonde(usize, usize, String),
+    InvalidVandermonde {
+        first_index: usize,
+        second_index: usize,
+        value_repr: String,
+    },
     /// `{0}` is the actual number of shards and `{1}` is the expected amount.
     #[error("Expected at least {1} shards, got {0}")]
     TooFewShards(usize, usize),
-- 
GitLab


From 86d04806d2c5b2704cf5e61c2b9ba9999c2e48ec Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Tue, 1 Apr 2025 10:55:48 +0200
Subject: [PATCH 20/56] doc error

---
 src/error.rs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/error.rs b/src/error.rs
index 5c38d8ab..304e3fd4 100644
--- a/src/error.rs
+++ b/src/error.rs
@@ -1,6 +1,6 @@
 //! Komodo-specific errors
 //!
-//! there are a few linear algebra errors and some related to [crate::zk].
+//! There are a few linear algebra errors and some related to [crate::zk].
 use thiserror::Error;
 
 /// An error that Komodo could end up producing.
-- 
GitLab


From f833c4640c6c0d64f53683444875e42e5a4649b1 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Tue, 1 Apr 2025 10:59:24 +0200
Subject: [PATCH 21/56] doc fs

---
 src/fs.rs | 44 ++++++++++++++++++++++----------------------
 1 file changed, 22 insertions(+), 22 deletions(-)

diff --git a/src/fs.rs b/src/fs.rs
index 360a698c..af75bd42 100644
--- a/src/fs.rs
+++ b/src/fs.rs
@@ -1,4 +1,4 @@
-//! interact with the filesystem, read from and write to it
+//! Interact with the filesystem, read from and write to it.
 use std::{
     fs::File,
     io::prelude::*,
@@ -15,18 +15,18 @@ use tracing::info;
 
 use crate::semi_avid::Block;
 
-/// dump any serializable object to the disk
+/// Dumps any serializable object to the disk.
 ///
 /// - `dumpable` can be anything that is _serializable_
 /// - if `filename` is provided, then it will be used as the filename as is
 /// - otherwise, the hash of the _dumpable_ will be computed and used as the
 ///   filename
 ///
-/// this function will return the name of the file the _dumpable_ has been
+/// This function will return the name of the file the _dumpable_ has been
 /// dumped to.
 pub fn dump(
     dumpable: &impl CanonicalSerialize,
-    dump_dir: &Path,
+    directory: &Path,
     filename: Option<&str>,
     compress: Compress,
 ) -> Result<String> {
@@ -43,7 +43,7 @@ pub fn dump(
             .join(""),
     };
 
-    let dump_path = dump_dir.join(&filename);
+    let dump_path = directory.join(&filename);
 
     info!("dumping dumpable into `{:?}`", dump_path);
     let mut file = File::create(&dump_path)?;
@@ -52,29 +52,29 @@ pub fn dump(
     Ok(filename)
 }
 
-/// dump a bunch of blocks to the disk and return a JSON / NUON compatible list
-/// of all the hashes that have been dumped
+/// Dumps a bunch of blocks to the disk and returns a JSON / NUON compatible list
+/// of all the hashes that have been dumped.
 ///
 /// > **Note**
 /// >
-/// > this is a wrapper around [`dump`]
+/// > This is a wrapper around [`dump`].
 ///
 /// # Example
-/// let's say we give three blocks to [`dump_blocks`] and their hashes are `aaaa`, `bbbb` and
+/// Let's say we give three blocks to [`dump_blocks`] and their hashes are `aaaa`, `bbbb` and
 /// `cccc` respectively, then this function will return
 /// ```json
-/// '["aaaa", "bbbb", "cccc"]'
+/// ["aaaa", "bbbb", "cccc"]
 /// ```
 pub fn dump_blocks<F: PrimeField, G: CurveGroup<ScalarField = F>>(
     blocks: &[Block<F, G>],
-    block_dir: &PathBuf,
+    directory: &PathBuf,
     compress: Compress,
 ) -> Result<String> {
-    info!("dumping blocks to `{:?}`", block_dir);
+    info!("dumping blocks to `{:?}`", directory);
     let mut hashes = vec![];
-    std::fs::create_dir_all(block_dir)?;
+    std::fs::create_dir_all(directory)?;
     for block in blocks.iter() {
-        let hash = dump(block, block_dir, None, compress)?;
+        let hash = dump(block, directory, None, compress)?;
         hashes.push(hash);
     }
 
@@ -87,30 +87,30 @@ pub fn dump_blocks<F: PrimeField, G: CurveGroup<ScalarField = F>>(
     Ok(formatted_output)
 }
 
-/// read blocks from a list of block hashes
+/// Reads blocks from a list of block hashes.
 ///
 /// > **Note**
 /// >
-/// > this is a basically the inverse of [`dump_blocks`]
+/// > This is a basically the inverse of [`dump_blocks`].
 ///
 /// # Example
-/// let's say we have three blocks `A`, `B` and `C` whose hashes are `aaaa`, `bbbb` and `cccc`
+/// Let's say we have three blocks `A`, `B` and `C` whose hashes are `aaaa`, `bbbb` and `cccc`
 /// respectively.
-/// if one calls [`read_blocks`] with `aaaa` and `cccc` as the queried block hashes, the output of
+/// If one calls [`read_blocks`] with `aaaa` and `cccc` as the queried block hashes, the output of
 /// this function will be
 /// ```ignore
 /// Ok(vec![("aaaa", A), ("cccc", C)])
 /// ```
 pub fn read_blocks<F: PrimeField, G: CurveGroup<ScalarField = F>>(
-    block_hashes: &[String],
-    block_dir: &Path,
+    hashes: &[String],
+    directory: &Path,
     compress: Compress,
     validate: Validate,
 ) -> Result<Vec<(String, Block<F, G>)>> {
-    block_hashes
+    hashes
         .iter()
         .map(|f| {
-            let filename = block_dir.join(f);
+            let filename = directory.join(f);
             let s = std::fs::read(filename)?;
             Ok((
                 f.clone(),
-- 
GitLab


From cd9caae0ad8f53a559d689aa354a419df0cbcb92 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Tue, 1 Apr 2025 11:54:14 +0200
Subject: [PATCH 22/56] doc zk

---
 src/error.rs |  6 ++---
 src/zk.rs    | 69 ++++++++++++++++++++++++++++------------------------
 2 files changed, 39 insertions(+), 36 deletions(-)

diff --git a/src/error.rs b/src/error.rs
index 304e3fd4..594b67f3 100644
--- a/src/error.rs
+++ b/src/error.rs
@@ -47,10 +47,8 @@ pub enum KomodoError {
     IncompatibleBlocks(String),
     #[error("Degree is zero")]
     DegreeIsZero,
-    /// `{0}` is the supported degree of the trusted setup and `{1}` is the actual requested
-    /// polynomial degree
-    #[error("too many coefficients: max is {0}, found {0}")]
-    TooFewPowersInTrustedSetup(usize, usize),
+    #[error("too many coefficients: max is {powers}, found {coefficients}")]
+    TooFewPowersInTrustedSetup { powers: usize, coefficients: usize },
     /// `{0}` is a custom error message.
     #[error("Another error: {0}")]
     Other(String),
diff --git a/src/zk.rs b/src/zk.rs
index 09640c6d..7c4452f1 100644
--- a/src/zk.rs
+++ b/src/zk.rs
@@ -1,9 +1,14 @@
-//! a replacement of Arkworks' KZG10 module
+//! A replacement for Arkworks' KZG10 module.
 //!
-//! this module mostly redefines [`ark_poly_commit::kzg10::KZG10::setup`] and
+//! This module mostly redefines [`ark_poly_commit::kzg10::KZG10::setup`] and
 //! [`ark_poly_commit::kzg10::KZG10::commit`] to be used with [`crate::semi_avid`].
 //!
-//! also defines some tool functions such as [`trim`] or [`nb_elements_in_setup`].
+//! Also defines some tool functions such as [`nb_elements_in_setup`] or [`trim`] in `kzg`* or
+//! `aplonk`*.
+//!
+//! > **Note**
+//! >
+//! > in all this module, _ZK_ means _Zero-Knowledge_.
 use ark_ec::{scalar_mul::fixed_base::FixedBase, CurveGroup, VariableBaseMSM};
 use ark_ff::PrimeField;
 use ark_poly::DenseUVPolynomial;
@@ -17,13 +22,16 @@ use ark_poly_commit::kzg10;
 
 use crate::error::KomodoError;
 
-/// a ZK trusted setup
+/// A ZK trusted setup.
+///
+/// This is a simple wrapper around a sequence of elements of the curve, the first $d$ powers of a
+/// _toxic waste_ element $\tau$ on $\mathbb{G}_1$.
 ///
-/// this is a simple wrapper around a sequence of elements of the curve.
+/// $$ \text{TS} = ([\tau^j]_1)\_{0 \leq j \leq d - 1} $$
 ///
 /// > **Note**
 /// >
-/// > this is a simpler version of [`ark_poly_commit::kzg10::UniversalParams`]
+/// > This is a simpler version of [`ark_poly_commit::kzg10::UniversalParams`].
 #[derive(Debug, Clone, Default, CanonicalSerialize, CanonicalDeserialize, PartialEq)]
 pub struct Powers<F: PrimeField, G: CurveGroup<ScalarField = F>>(Vec<G::Affine>);
 
@@ -42,21 +50,25 @@ impl<F: PrimeField, G: CurveGroup<ScalarField = F>> IntoIterator for Powers<F, G
     }
 }
 
-/// a ZK commitment, i.e. an evaluation of a given polynomial on a secret element
+/// A ZK commitment, i.e. an evaluation of a given polynomial on a secret element.
 ///
-/// this is a simple wrapper around a single elemenf of the curve.
+/// This is a simple wrapper around a single elemenf of the curve.
+///
+/// If $P = (p_j)$ is the polynomial to commit and $\tau$ is the secret, then [`Commitment`] will
+/// hold
+/// $$\text{com}(P) = [P(\tau)]_1 = \sum\limits\_{j = 0}^{\text{deg}(P) - 1} p_j [\tau^j]_1$$
 ///
 /// > **Note**
 /// >
-/// > this is a simpler version of [`ark_poly_commit::kzg10::Commitment`]
+/// > This is a simpler version of [`ark_poly_commit::kzg10::Commitment`].
 #[derive(Debug, Clone, Copy, Default, CanonicalSerialize, CanonicalDeserialize, PartialEq)]
 pub struct Commitment<F: PrimeField, G: CurveGroup<ScalarField = F>>(pub G::Affine);
 
-/// create a trusted setup of a given size, the expected maximum degree of the data
+/// Creates a trusted setup of a given size, the expected maximum degree of the data.
 ///
 /// > **Note**
 /// >
-/// > this is a simpler version of [`ark_poly_commit::kzg10::KZG10::setup`]
+/// > This is a simpler version of [`ark_poly_commit::kzg10::KZG10::setup`]
 pub fn setup<F: PrimeField, G: CurveGroup<ScalarField = F>>(
     max_degree: usize,
     rng: &mut impl RngCore,
@@ -90,18 +102,6 @@ pub fn setup<F: PrimeField, G: CurveGroup<ScalarField = F>>(
     Ok(Powers(powers_of_g))
 }
 
-fn check_degree_is_too_large(degree: usize, num_powers: usize) -> Result<(), KomodoError> {
-    let num_coefficients = degree + 1;
-    if num_coefficients > num_powers {
-        Err(KomodoError::TooFewPowersInTrustedSetup(
-            num_powers,
-            num_coefficients,
-        ))
-    } else {
-        Ok(())
-    }
-}
-
 fn skip_leading_zeros_and_convert_to_bigints<F: PrimeField, P: DenseUVPolynomial<F>>(
     p: &P,
 ) -> (usize, Vec<F::BigInt>) {
@@ -122,11 +122,11 @@ fn convert_to_bigints<F: PrimeField>(p: &[F]) -> Vec<F::BigInt> {
     coeffs
 }
 
-/// compute a commitment of a polynomial on a trusted setup
+/// Computes a commitment of a polynomial on a trusted setup.
 ///
 /// > **Note**
 /// >
-/// > this is a simpler version of [`ark_poly_commit::kzg10::KZG10::commit`]
+/// > This is a simpler version of [`ark_poly_commit::kzg10::KZG10::commit`].
 pub fn commit<F, G, P>(
     powers: &Powers<F, G>,
     polynomial: &P,
@@ -136,7 +136,12 @@ where
     G: CurveGroup<ScalarField = F>,
     P: DenseUVPolynomial<F>,
 {
-    check_degree_is_too_large(polynomial.degree(), powers.len())?;
+    if polynomial.degree() + 1 > powers.len() {
+        return Err(KomodoError::TooFewPowersInTrustedSetup {
+            powers: powers.len(),
+            coefficients: polynomial.degree() + 1,
+        });
+    }
 
     let commit_time =
         start_timer!(|| format!("Committing to polynomial of degree {}", polynomial.degree(),));
@@ -155,13 +160,13 @@ where
     Ok(Commitment(commitment.into()))
 }
 
-/// compute the commitments of a set of polynomials
+/// Computes the commitments of a set of polynomials.
 ///
-/// this function uses the commit scheme of KZG.
+/// This function uses the commit scheme of KZG and [`commit`].
 ///
 /// > **Note**
 /// > - `powers` can be generated with functions like [`setup`]
-/// > - if `polynomials` has length `m`, then [`batch_commit`] will generate `m` commits
+/// > - if `polynomials` has length `m`, then [`batch_commit`] will generate `m` commitments
 /// > - see [`commit`] for the individual _commit_ operations
 #[allow(clippy::type_complexity)]
 #[inline(always)]
@@ -184,8 +189,8 @@ where
     Ok(commits)
 }
 
-/// compute the number of elements that a _trusted setup_ should have for data of
-/// a certain expected size
+/// Computes the number of elements that a _trusted setup_ $TS$ should have for data of
+/// a certain expected size.
 pub fn nb_elements_in_setup<F: PrimeField>(nb_bytes: usize) -> usize {
     let bytes_per_element = (F::MODULUS_BIT_SIZE as usize) / 8;
     nb_bytes / bytes_per_element
@@ -223,7 +228,7 @@ pub fn trim<E: Pairing>(
 
 #[cfg(any(feature = "kzg", feature = "aplonk"))]
 #[allow(clippy::type_complexity)]
-/// same as [`batch_commit`] but uses [`ark_poly_commit::kzg10::KZG10::commit`] instead of [`commit`]
+/// Same as [`batch_commit`] but uses [`ark_poly_commit::kzg10::KZG10::commit`] instead of [`commit`].
 pub fn ark_commit<E, P>(
     powers: &kzg10::Powers<E>,
     polynomials: &[P],
-- 
GitLab


From ba26e8c5faeaaa3c06c8874468697e00fba611b2 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Tue, 1 Apr 2025 13:04:03 +0200
Subject: [PATCH 23/56] doc fec

---
 src/fec.rs | 73 ++++++++++++++++++++++++++++++++++++++----------------
 1 file changed, 52 insertions(+), 21 deletions(-)

diff --git a/src/fec.rs b/src/fec.rs
index 95149369..3d76a2b5 100644
--- a/src/fec.rs
+++ b/src/fec.rs
@@ -1,4 +1,20 @@
-//! a module to encode, recode and decode shards of data with FEC methods.
+//! A module to encode, recode and decode shards of data with [FEC] methods.
+//!
+//! In all the following, $(k, n)$ codes will be described, where $k$ is the number of source
+//! shards and $n$ is the number of encoded shards.
+//!
+//! The _code ratio_ is defined as $\rho = \frac{k}{n}$.
+//!
+//! ## Example
+//! In the following example, a file is encoded and decoded back.
+//!
+//! The dotted circle in between "_dissemination_" and "_gathering_" represents the "_life_" of the
+//! shards, e.g. them being shared between peers on a network, recoded or lost.
+#![doc = simple_mermaid::mermaid!("fec.mmd")]
+//! In the end, [FEC] methods guarantee that $F^* = F$, as long as at least $k$ linearly
+//! independant shards are gathered before decoding.
+//!
+//! [FEC]: https://en.wikipedia.org/wiki/Error_correction_code
 
 use ark_ff::PrimeField;
 use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
@@ -8,14 +24,16 @@ use rs_merkle::{algorithms::Sha256, Hasher};
 
 use crate::{algebra, algebra::linalg::Matrix, error::KomodoError};
 
-/// representation of a [FEC](https://en.wikipedia.org/wiki/Error_correction_code) shard of data.
+/// Representation of a [FEC] shard of data.
+///
+/// [FEC]: https://en.wikipedia.org/wiki/Error_correction_code
 #[derive(Debug, Default, Clone, PartialEq, CanonicalSerialize, CanonicalDeserialize)]
 pub struct Shard<F: PrimeField> {
     /// the code parameter, required to decode
     pub k: u32,
-    /// tells the decoder how the shard was constructed with respect to the original source shards
+    /// tells the decoder how the shard was constructed with respect to the original source shards.
     ///
-    /// this effectively allows support for _recoding_.
+    /// This effectively allows support for _recoding_.
     ///
     /// If we denote the $k$ source shards by $(s\_i)\_\{0 \le i \lt k\}$, the linear combination by $k$
     /// coefficients $(\alpha_i)_{0 \le i \lt k}$ and $s$ the shard itself, then
@@ -31,9 +49,9 @@ pub struct Shard<F: PrimeField> {
 }
 
 impl<F: PrimeField> Shard<F> {
-    /// compute the linear combination between two [`Shard`]s
+    /// Computes the linear combination between two [`Shard`]s.
     ///
-    /// if we denote the [`Shard`] itself and the other [`Shard`] by $s$ and $o$ respectively, the
+    /// If we denote the [`Shard`] itself and the other [`Shard`] by $s$ and $o$ respectively, the
     /// output is
     /// $$ \alpha s + \beta o $$
     pub fn recode_with(&self, alpha: F, other: &Self, beta: F) -> Self {
@@ -63,17 +81,17 @@ impl<F: PrimeField> Shard<F> {
     }
 }
 
-/// compute the linear combination between an arbitrary number of [`Shard`]s
+/// Computes the linear combination between an arbitrary number of [`Shard`]s.
 ///
 /// > **Note**
 /// >
-/// > this is basically a multi-[`Shard`] wrapper around [`Shard::recode_with`]
+/// > This is basically a multi-[`Shard`] wrapper around [`Shard::recode_with`].
 /// >
-/// > returns [`None`] if the number of shards is not the same as the number of
-/// > coefficients or if no shards are provided.
+/// > [`recode_with_coeffs`] will return [`None`] if the number of shards
+/// > is not the same as the number of coefficients or if no shards are provided.
 ///
-/// if the shards are the $(s \_i)\_\{1 \le i \le n\}$ and the coefficients the
-/// $(\alpha\_i)\_\{0 \le i \le n\}$, then the output will be
+/// If the shards are the $(s \_i)\_\{1 \le i \le n\}$ and the coefficients the
+/// $(\alpha\_i)\_\{1 \le i \le n\}$, then the output will be
 ///
 /// $$ \sum\limits_{i = 1}^{n} \alpha_i s_i$$
 pub fn recode_with_coeffs<F: PrimeField>(shards: &[Shard<F>], coeffs: &[F]) -> Option<Shard<F>> {
@@ -94,16 +112,16 @@ pub fn recode_with_coeffs<F: PrimeField>(shards: &[Shard<F>], coeffs: &[F]) -> O
     Some(s)
 }
 
-/// compute a recoded shard from an arbitrary set of shards
+/// Computes a recoded shard from an arbitrary set of shards.
 ///
-/// coefficients will be drawn at random, one for each shard.
+/// Coefficients will be drawn at random, one for each shard.
 ///
-/// if the shards appear to come from different data, e.g. if `k` is not the
+/// If the shards appear to come from different data, e.g. if $k$ is not the
 /// same or the hash of the data is different, an error will be returned.
 ///
 /// > **Note**
 /// >
-/// > this is a wrapper around [`recode_with_coeffs`].
+/// > This is a wrapper around [`recode_with_coeffs`].
 pub fn recode_random<F: PrimeField>(
     shards: &[Shard<F>],
     rng: &mut impl RngCore,
@@ -133,15 +151,21 @@ pub fn recode_random<F: PrimeField>(
     Ok(recode_with_coeffs(shards, &coeffs))
 }
 
-/// applies a given encoding matrix to some data to generate encoded shards
+/// Applies a given encoding matrix to some data to generate encoded shards.
+///
+/// We arrange the source shards to be encoded in an $m \times k$ matrix $S$, i.e. $k$ shards of
+/// length $m$. The encoding matrix $M$ then is a $k \times n$ matrix and the encoded shards are
+/// the $n$ columns of
+///
+/// $$E = S M$$
 ///
 /// > **Note**
 /// >
-/// > the input data and the encoding matrix should have compatible shapes,
+/// > The input data and the encoding matrix should have compatible shapes,
 /// > otherwise, an error might be thrown to the caller.
 ///
 /// Padding might be applied depending on the size of the data compared to the size of the encoding
-/// matrix. (see [`algebra::split_data_into_field_elements`])
+/// matrix, see [`algebra::split_data_into_field_elements`].
 ///
 /// This is the inverse of [`decode`].
 pub fn encode<F: PrimeField>(
@@ -175,11 +199,18 @@ pub fn encode<F: PrimeField>(
         .collect())
 }
 
-/// reconstruct the original data from a set of encoded, possibly recoded, shards
+/// Reconstructs the original data from a set of encoded, possibly recoded, shards.
+///
+/// Let's assume at least $k$ linearly independant shards have been retrieved and put in a matrix
+/// $\hat{E}$. We use the [linear combination][`Shard::linear_combination`] of each shard to
+/// reconstruct the columns of the square submatrix $\hat{M}$ that has been used to encode these
+/// shards. Then the reconstructed source shards $\hat{S}$ are given by
+///
+/// $$\hat{S} = \hat{M}^{-1} \hat{E}$$
 ///
 /// > **Note**
 /// >
-/// > this function might fail in a variety of cases
+/// > This function might fail in a variety of cases
 /// > - if there are too few shards
 /// > - if there are linear dependencies between shards
 ///
-- 
GitLab


From eea70d037f19bebb437f186fe2d6f9e637229d15 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Tue, 1 Apr 2025 13:21:36 +0200
Subject: [PATCH 24/56] doc semi_avid

---
 src/semi_avid.rs | 32 +++++++++++++++++++++-----------
 1 file changed, 21 insertions(+), 11 deletions(-)

diff --git a/src/semi_avid.rs b/src/semi_avid.rs
index 59638ed8..45fa67cc 100644
--- a/src/semi_avid.rs
+++ b/src/semi_avid.rs
@@ -16,17 +16,27 @@
 //! used: _the commitment of a linear combination of polynomials is equal to the same linear
 //! combination of the commiments of the same polynomials_.
 //!
+//! $$\forall (\alpha_i) \in \mathbb{F}_p, (P_i) \in \mathbb{F}_p[X], \quad \text{com}\left(\sum\limits_i \alpha_i P_i\right) = \sum\limits_i \alpha_i \text{com}(P_i)$$
+//!
 //! This give us a simple, lightweight and fast commitment scheme.
 //!
+//! > **Note**
+//! >
+//! > in the following, the data is arranged in an $m \times k$ matrix and $i$ will denote the
+//! > number of a row and $j$ those of a column
+//! > - $0 \leq i \leq m - 1$
+//! > - $0 \leq j \leq k - 1$
+#![doc = simple_mermaid::mermaid!("semi_avid.mmd")]
+//!
 //! # Example
 //! > **Note**
 //! >
-//! > below, `F`, `G` and `DP<F>` are explicitely specified everywhere but, in _real_ code, i.e.
+//! > Below, `F`, `G` and `DP<F>` are explicitely specified everywhere but, in _real_ code, i.e.
 //! > using generic types as it's commonly done in Arkworks, it should be possible to specify them
 //! > once and Rust will take care of _carrying_ the types in the rest of the code. Also, `DP<F>`
 //! > will likely be its own generic type, usually written `P` in this code base.
 //! >
-//! > see the Semi-AVID example for a fully-typed code.
+//! > See the Semi-AVID example for a fully-typed code.
 //!
 //! - first, let's import some types...
 //! ```
@@ -149,9 +159,9 @@ use crate::{
     zk::{self, Commitment, Powers},
 };
 
-/// representation of a block of proven data.
+/// Representation of a block of proven data.
 ///
-/// this is a wrapper around a [`fec::Shard`] with some additional cryptographic
+/// This is a wrapper around a [`fec::Shard`] with some additional cryptographic
 /// information that allows to prove the integrity of said shard.
 #[derive(Debug, Default, Clone, PartialEq, CanonicalSerialize, CanonicalDeserialize)]
 pub struct Block<F: PrimeField, G: CurveGroup<ScalarField = F>> {
@@ -208,16 +218,16 @@ impl<F: PrimeField, G: CurveGroup<ScalarField = F>> std::fmt::Display for Block<
     }
 }
 
-/// compute a recoded block from an arbitrary set of blocks
+/// Computes a recoded block from an arbitrary set of blocks.
 ///
-/// coefficients will be drawn at random, one for each block.
+/// Coefficients will be drawn at random, one for each block.
 ///
-/// if the blocks appear to come from different data, e.g. if the commits are
+/// If the blocks appear to come from different data, e.g. if the commits are
 /// different, an error will be returned.
 ///
 /// > **Note**
 /// >
-/// > this is a wrapper around [`fec::recode_random`].
+/// > This is a wrapper around [`fec::recode_random`].
 pub fn recode<F: PrimeField, G: CurveGroup<ScalarField = F>>(
     blocks: &[Block<F, G>],
     rng: &mut impl RngCore,
@@ -244,7 +254,7 @@ pub fn recode<F: PrimeField, G: CurveGroup<ScalarField = F>>(
     }))
 }
 
-/// compute the Semi-AVID proof for some data
+/// Computes the Semi-AVID proof for some data.
 pub fn prove<F, G, P>(
     bytes: &[u8],
     powers: &Powers<F, G>,
@@ -281,7 +291,7 @@ where
     Ok(commits)
 }
 
-/// attach a Semi-AVID proof to a collection of encoded shards
+/// Attaches a Semi-AVID proof to a collection of encoded shards.
 #[inline(always)]
 pub fn build<F, G, P>(shards: &[Shard<F>], proof: &[Commitment<F, G>]) -> Vec<Block<F, G>>
 where
@@ -299,7 +309,7 @@ where
         .collect::<Vec<_>>()
 }
 
-/// verify that a single block of encoded and proven data is valid
+/// Verifies that a single block of encoded and proven data is valid.
 pub fn verify<F, G, P>(
     block: &Block<F, G>,
     verifier_key: &Powers<F, G>,
-- 
GitLab


From a3a3d6490551f3b5e6df6f3c05b1654b6fe3c3cc Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Tue, 1 Apr 2025 13:27:16 +0200
Subject: [PATCH 25/56] use struct error for `IncompatibleShards`

---
 src/error.rs | 10 +++++++---
 src/fec.rs   | 30 ++++++++++++++++++------------
 2 files changed, 25 insertions(+), 15 deletions(-)

diff --git a/src/error.rs b/src/error.rs
index 594b67f3..8715d7f8 100644
--- a/src/error.rs
+++ b/src/error.rs
@@ -39,9 +39,13 @@ pub enum KomodoError {
     /// `{0}` is the actual number of shards and `{1}` is the expected amount.
     #[error("Expected at least {1} shards, got {0}")]
     TooFewShards(usize, usize),
-    /// `{0}` is a custom error message when shards are incompatible.
-    #[error("Shards are incompatible: {0}")]
-    IncompatibleShards(String),
+    #[error("Shards are incompatible ({key} is not the same at {index}: {left} vs {right})")]
+    IncompatibleShards {
+        key: String,
+        index: usize,
+        left: String,
+        right: String,
+    },
     /// `{0}` is a custom error message when blocks are incompatible.
     #[error("Blocks are incompatible: {0}")]
     IncompatibleBlocks(String),
diff --git a/src/fec.rs b/src/fec.rs
index 3d76a2b5..79ff6a06 100644
--- a/src/fec.rs
+++ b/src/fec.rs
@@ -128,22 +128,28 @@ pub fn recode_random<F: PrimeField>(
 ) -> Result<Option<Shard<F>>, KomodoError> {
     for (i, (s1, s2)) in shards.iter().zip(shards.iter().skip(1)).enumerate() {
         if s1.k != s2.k {
-            return Err(KomodoError::IncompatibleShards(format!(
-                "k is not the same at {}: {} vs {}",
-                i, s1.k, s2.k
-            )));
+            return Err(KomodoError::IncompatibleShards {
+                key: "k".to_string(),
+                index: i,
+                left: s1.k.to_string(),
+                right: s2.k.to_string(),
+            });
         }
         if s1.hash != s2.hash {
-            return Err(KomodoError::IncompatibleShards(format!(
-                "hash is not the same at {}: {:?} vs {:?}",
-                i, s1.hash, s2.hash
-            )));
+            return Err(KomodoError::IncompatibleShards {
+                key: "hash".to_string(),
+                index: i,
+                left: format!("{:?}", s1.hash),
+                right: format!("{:?}", s2.hash),
+            });
         }
         if s1.size != s2.size {
-            return Err(KomodoError::IncompatibleShards(format!(
-                "size is not the same at {}: {} vs {}",
-                i, s1.size, s2.size
-            )));
+            return Err(KomodoError::IncompatibleShards {
+                key: "size".to_string(),
+                index: i,
+                left: s1.size.to_string(),
+                right: s2.size.to_string(),
+            });
         }
     }
 
-- 
GitLab


From dcc40ee963c9bac39e61dec8eb5599d0b728ccfd Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Tue, 1 Apr 2025 13:28:00 +0200
Subject: [PATCH 26/56] add missing MMD files

---
 src/fec.mmd       | 15 +++++++++++++++
 src/semi_avid.mmd | 14 ++++++++++++++
 2 files changed, 29 insertions(+)
 create mode 100644 src/fec.mmd
 create mode 100644 src/semi_avid.mmd

diff --git a/src/fec.mmd b/src/fec.mmd
new file mode 100644
index 00000000..ede87859
--- /dev/null
+++ b/src/fec.mmd
@@ -0,0 +1,15 @@
+flowchart LR
+    file@{ shape: rounded, label: "original file $$\\ F$$" }
+    source@{ shape: processes, label: "$$k\\ $$ source shards" }
+    encoded@{ shape: processes, label: "$$n\\ $$ encoded shards" }
+
+    gathered@{ shape: processes, label: "at least $$\\ k\\ $$ shards" }
+    decoded@{ shape: processes, label: "$$k\\ $$ decoded shards" }
+    reconstructed@{ shape: rounded, label: "reconstructed file $$\\ F^*$$" }
+
+    life_1@{ shape: framed-circle, label: "life" }
+    life_2@{ shape: framed-circle, label: "life" }
+
+    file --split--> source --"$$(k, n)\\ $$ encoding"--> encoded --disseminate--> life_1
+
+    life_2 --gather--> gathered --"$$(k, n)\\ $$ decoding"--> decoded --concat--> reconstructed
diff --git a/src/semi_avid.mmd b/src/semi_avid.mmd
new file mode 100644
index 00000000..876affd1
--- /dev/null
+++ b/src/semi_avid.mmd
@@ -0,0 +1,14 @@
+sequenceDiagram
+    actor prover
+    actor verifier
+
+    Note over prover,verifier: generate trusted setup<br/>$$\ \text{TS} = ([\tau^i]_1)$$
+
+    Note left of prover: split the data into $$\ k\ \text{polynomials of degree}\ m - 1$$
+    Note left of prover: commit the $$\ k \text{ polynomials and get the } (c_j)$$
+
+    prover->>verifier: $$s_{\alpha} \text{, } (c_j)$$
+
+    Note right of verifier: get the linear combination $$\ (\lambda_j) \text{ and polynomial } P_\alpha \text{ of shard } s_\alpha$$
+    Note right of verifier: compute the linear combination of the $$\ (c_j) \text{: }\hat{c} = \sum\limits \lambda_j c_j$$
+    Note right of verifier: assert $$\ \hat{c} = \text{com}(P_{\alpha})$$
-- 
GitLab


From 0df46a8346446b36777ba5527575f4e12a9cc410 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Tue, 1 Apr 2025 13:30:02 +0200
Subject: [PATCH 27/56] use struct error for `IncompatibleBlocks`

---
 src/error.rs     | 10 +++++++---
 src/semi_avid.rs | 10 ++++++----
 2 files changed, 13 insertions(+), 7 deletions(-)

diff --git a/src/error.rs b/src/error.rs
index 8715d7f8..e7e94447 100644
--- a/src/error.rs
+++ b/src/error.rs
@@ -46,9 +46,13 @@ pub enum KomodoError {
         left: String,
         right: String,
     },
-    /// `{0}` is a custom error message when blocks are incompatible.
-    #[error("Blocks are incompatible: {0}")]
-    IncompatibleBlocks(String),
+    #[error("Blocks are incompatible ({key} is not the same at {index}: {left} vs {right})")]
+    IncompatibleBlocks {
+        key: String,
+        index: usize,
+        left: String,
+        right: String,
+    },
     #[error("Degree is zero")]
     DegreeIsZero,
     #[error("too many coefficients: max is {powers}, found {coefficients}")]
diff --git a/src/semi_avid.rs b/src/semi_avid.rs
index 45fa67cc..0929537c 100644
--- a/src/semi_avid.rs
+++ b/src/semi_avid.rs
@@ -234,10 +234,12 @@ pub fn recode<F: PrimeField, G: CurveGroup<ScalarField = F>>(
 ) -> Result<Option<Block<F, G>>, KomodoError> {
     for (i, (b1, b2)) in blocks.iter().zip(blocks.iter().skip(1)).enumerate() {
         if b1.proof != b2.proof {
-            return Err(KomodoError::IncompatibleBlocks(format!(
-                "proofs are not the same at {}: {:?} vs {:?}",
-                i, b1.proof, b2.proof
-            )));
+            return Err(KomodoError::IncompatibleBlocks {
+                key: "proof".to_string(),
+                index: i,
+                left: format!("{:?}", b1.proof),
+                right: format!("{:?}", b2.proof),
+            });
         }
     }
     let shard = match fec::recode_random(
-- 
GitLab


From 83b5df675465af1df699d16702b54112f89168ac Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Tue, 1 Apr 2025 14:45:33 +0200
Subject: [PATCH 28/56] doc lib

---
 src/lib.mmd | 11 +++++++++++
 src/lib.rs  |  8 ++++++--
 2 files changed, 17 insertions(+), 2 deletions(-)
 create mode 100644 src/lib.mmd

diff --git a/src/lib.mmd b/src/lib.mmd
new file mode 100644
index 00000000..bc6c6fb6
--- /dev/null
+++ b/src/lib.mmd
@@ -0,0 +1,11 @@
+sequenceDiagram
+    actor prover
+    actor verifier
+
+    Note over prover,verifier: generate public objects required<br/>for proving and verifying
+
+    Note left of prover: computes public commitments $$\ (c) \text{ and proof } \pi \text{ associated with } s$$
+
+    prover->>verifier: $$(c) \text{, } \pi$$
+
+    Note right of verifier: verify that $$\ (c) \text{, } \pi \text{ and } s\ $$ are consistent
diff --git a/src/lib.rs b/src/lib.rs
index abc68e21..a363d9c9 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -1,4 +1,4 @@
-//! Komodo: Cryptographically-proven Erasure Coding
+//! Komodo: Cryptographically-proven Erasure Coding.
 //!
 //! Komodo provides an easy-to-use Rust library and ecosystem that is composed of two main parts:
 //! - support for FEC encoding and decoding with the [`fec`] submodule
@@ -14,7 +14,11 @@
 //! mandatory to explore to understand the protocols.
 //!
 //! # Example
-//! Let's explain with a very simple example how things operate with Komodo.
+//! Let's explain with a very simple example how things operate with Komodo. The setup is that a
+//! _prover_ wants to show a _verifier_ that a shard of encoded data $s$ has indeed been generated
+//! with a linear combination of the $k$ source shards.
+//!
+#![doc = simple_mermaid::mermaid!("lib.mmd")]
 //!
 //! > **Note**
 //! >
-- 
GitLab


From 09e40eac9378210298d1a06a8b23187a7f6c7511 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Tue, 1 Apr 2025 15:06:41 +0200
Subject: [PATCH 29/56] semi_avid precisions

---
 src/semi_avid.rs | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/src/semi_avid.rs b/src/semi_avid.rs
index 0929537c..8ef7de09 100644
--- a/src/semi_avid.rs
+++ b/src/semi_avid.rs
@@ -22,8 +22,8 @@
 //!
 //! > **Note**
 //! >
-//! > in the following, the data is arranged in an $m \times k$ matrix and $i$ will denote the
-//! > number of a row and $j$ those of a column
+//! > In the following, the data $\Delta$ is arranged in an $m \times k$ matrix and $i$ will denote
+//! the number of a row and $j$ those of a column
 //! > - $0 \leq i \leq m - 1$
 //! > - $0 \leq j \leq k - 1$
 #![doc = simple_mermaid::mermaid!("semi_avid.mmd")]
-- 
GitLab


From 7159b709887533680cebc1bc63a0c7e2aaa83321 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Tue, 1 Apr 2025 15:13:12 +0200
Subject: [PATCH 30/56] simplify semi_avid

---
 src/semi_avid.mmd | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/src/semi_avid.mmd b/src/semi_avid.mmd
index 876affd1..35914fe3 100644
--- a/src/semi_avid.mmd
+++ b/src/semi_avid.mmd
@@ -4,11 +4,12 @@ sequenceDiagram
 
     Note over prover,verifier: generate trusted setup<br/>$$\ \text{TS} = ([\tau^i]_1)$$
 
-    Note left of prover: split the data into $$\ k\ \text{polynomials of degree}\ m - 1$$
-    Note left of prover: commit the $$\ k \text{ polynomials and get the } (c_j)$$
+    Note left of prover: $$(P_j) = \text{split}(\Delta)$$
+    Note left of prover: $$c_j = \text{com}(P_j)$$
 
-    prover->>verifier: $$s_{\alpha} \text{, } (c_j)$$
+    prover->>verifier: $$(c_j)$$
 
-    Note right of verifier: get the linear combination $$\ (\lambda_j) \text{ and polynomial } P_\alpha \text{ of shard } s_\alpha$$
-    Note right of verifier: compute the linear combination of the $$\ (c_j) \text{: }\hat{c} = \sum\limits \lambda_j c_j$$
+    Note right of verifier: $$(\lambda_j) = \text{lincomb}(s_\alpha)$$
+    Note right of verifier: $$P_\alpha = \text{poly}(s_\alpha)$$
+    Note right of verifier: $$\hat{c} = \sum\limits \lambda_j c_j$$
     Note right of verifier: assert $$\ \hat{c} = \text{com}(P_{\alpha})$$
-- 
GitLab


From a28085b2a22632d02bc1c476c7473b1274746271 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Tue, 1 Apr 2025 15:17:58 +0200
Subject: [PATCH 31/56] simplify lib MMD

---
 src/lib.mmd | 3 ++-
 src/lib.rs  | 2 +-
 2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/src/lib.mmd b/src/lib.mmd
index bc6c6fb6..6f35a418 100644
--- a/src/lib.mmd
+++ b/src/lib.mmd
@@ -4,7 +4,8 @@ sequenceDiagram
 
     Note over prover,verifier: generate public objects required<br/>for proving and verifying
 
-    Note left of prover: computes public commitments $$\ (c) \text{ and proof } \pi \text{ associated with } s$$
+    Note left of prover: computes commitments $$\ (c) = \text{com}(\Delta)$$
+    Note left of prover: computes proof $$\ \pi = \text{prove}(s)$$
 
     prover->>verifier: $$(c) \text{, } \pi$$
 
diff --git a/src/lib.rs b/src/lib.rs
index a363d9c9..99f23367 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -16,7 +16,7 @@
 //! # Example
 //! Let's explain with a very simple example how things operate with Komodo. The setup is that a
 //! _prover_ wants to show a _verifier_ that a shard of encoded data $s$ has indeed been generated
-//! with a linear combination of the $k$ source shards.
+//! with a linear combination of the $k$ source shards from data $\Delta$.
 //!
 #![doc = simple_mermaid::mermaid!("lib.mmd")]
 //!
-- 
GitLab


From c2847298b0e57f4308b0312ca3a7f6e56479ba7c Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Tue, 1 Apr 2025 15:32:25 +0200
Subject: [PATCH 32/56] fix bad link in semi_avid

---
 src/semi_avid.rs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/semi_avid.rs b/src/semi_avid.rs
index 8ef7de09..d2c9d1e1 100644
--- a/src/semi_avid.rs
+++ b/src/semi_avid.rs
@@ -16,7 +16,7 @@
 //! used: _the commitment of a linear combination of polynomials is equal to the same linear
 //! combination of the commiments of the same polynomials_.
 //!
-//! $$\forall (\alpha_i) \in \mathbb{F}_p, (P_i) \in \mathbb{F}_p[X], \quad \text{com}\left(\sum\limits_i \alpha_i P_i\right) = \sum\limits_i \alpha_i \text{com}(P_i)$$
+//! $$\forall (\alpha_i) \in \mathbb{F}_p, (P_i) \in \mathbb{F}_p\[X\], \quad \text{com}\left(\sum\limits_i \alpha_i P_i\right) = \sum\limits_i \alpha_i \text{com}(P_i)$$
 //!
 //! This give us a simple, lightweight and fast commitment scheme.
 //!
-- 
GitLab


From 2305ea9030928d1b371988f43de2bed81c3c647c Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Tue, 1 Apr 2025 15:40:37 +0200
Subject: [PATCH 33/56] doc KZG

---
 src/kzg.mmd | 18 +++++++++---------
 src/kzg.rs  | 55 +++++++++++++++++++++++++++--------------------------
 2 files changed, 37 insertions(+), 36 deletions(-)

diff --git a/src/kzg.mmd b/src/kzg.mmd
index 5f1537a1..3951be93 100644
--- a/src/kzg.mmd
+++ b/src/kzg.mmd
@@ -4,15 +4,15 @@ sequenceDiagram
 
     Note over prover,verifier: generate trusted setup<br/>$$\ \text{TS} = ([\tau^j]_1)$$
 
-    Note left of prover: split the data into $$\ m\ \text{polynomials of degree}\ k - 1$$
-    Note left of prover: compute commitments $$\ (c_i) \text{ as } [P_i(\tau)]_1$$
-    Note left of prover: compute shard $$\ s_{\alpha} = (s_{\alpha, i}) = (P_i(\alpha))$$
-    Note left of prover: compute aggregation $$\ Q(X) = \sum\limits_ir^i P_i(X) \text{ where } r = H(P_0(\alpha)|...|P_{m-1}(\alpha))$$
-    Note left of prover: compute proof $$\ \pi_{\alpha} = \left[ \frac{Q(\tau) - Q(\alpha)}{\tau - \alpha} \right]_1$$
+    Note left of prover: $$(P_i) = \text{split}(\Delta)$$
+    Note left of prover: $$c_i = \text{com}(P_i, \text{TS})$$
+    Note left of prover: $$r = H(P_0(\alpha)|...|P_{m-1}(\alpha))$$
+    Note left of prover: $$Q(X) = \sum\limits_i r^i P_i(X)$$
+    Note left of prover: $$\pi_\alpha = \text{KZG.prove}(Q, \text{TS})$$
 
-    prover->>verifier: $$\pi_{\alpha} \text{, } s_{\alpha} \text{, } (c_i)$$
+    prover->>verifier: $$\pi_{\alpha} \text{, } (c_i)$$
 
-    Note right of verifier: compute $$\ r = H(s_0|...|s_{m - 1})$$
-    Note right of verifier: compute $$\ y = \sum\limits_i r^i s_{\alpha, i}$$
-    Note right of verifier: compute $$\ c = \sum\limits_i r^i c_i$$
+    Note right of verifier: $$\ r = H(s_0|...|s_{m - 1})$$
+    Note right of verifier: $$\ y = \sum\limits_i r^i s_{\alpha, i}$$
+    Note right of verifier: $$\ c = \sum\limits_i r^i c_i$$
     Note right of verifier: assert $$\ E(c - [y]_1, [1]_2) = E(\pi_{\alpha}, [\tau - \alpha]_2)$$
diff --git a/src/kzg.rs b/src/kzg.rs
index fa644ea5..3fa265f8 100644
--- a/src/kzg.rs
+++ b/src/kzg.rs
@@ -1,20 +1,21 @@
-//! KZG+: the multipolynomial and non-interactive extension of KZG
+//! $\text{KZG}^+$: the multipolynomial and non-interactive extension of $\text{KZG}$
 //!
 //! > references:
 //! > - [Kate et al., 2010](https://link.springer.com/chapter/10.1007/978-3-642-17373-8_11)
 //! > - [Boneh et al., 2020](https://eprint.iacr.org/2020/081)
 //!
+//! # The protocol
+//! Here, we assume that the input data has been encoded with a _Reed-Solomon_ encoding, as can be
+//! done with the [crate::fec] module.
+//!
 //! > **Note**
 //! >
-//! > in the following
+//! > In the following, the data $\Delta$ is arranged in an $m \times k$ matrix and $i$ will denote
+//! > the number of a row and $j$ those of a column
 //! > - $0 \leq i \leq m - 1$
 //! > - $0 \leq j \leq k - 1$
 #![doc = simple_mermaid::mermaid!("kzg.mmd")]
 //!
-//! # The protocol
-//! Here, we assume that the input data has been encoded with a _Reed-Solomon_ encoding, as can be
-//! done with the [crate::fec] module.
-//!
 //! Conveniently, each one of the $n$ encoded shards is a linear combination of the $k$ source
 //! shards. More precisely, it is the evaluation of the input data seen as a polynomial on some
 //! evalution point.
@@ -39,11 +40,11 @@
 //! - because $k$ is a fixed code parameter and the data can be of arbitrary size, the bytes are
 //!   arranged in an $m \times k$ matrix of finite field elements. Then, instead of computing $m$
 //!   proofs per shard, KZG+ will _aggregate_ the $m$ polynomials, one per row in the data, into a
-//!   single polynomial $P$. This is done by computing a random linear combination of the $m$ input
-//!   polynomials
+//!   single polynomial $Q$. This is done by computing a random linear combination of the $m$ input
+//!   polynomials.
 //!
 //! # Example
-//! see the KZG example.
+//! See the KZG example.
 use ark_ec::{pairing::Pairing, AffineRepr};
 use ark_ff::PrimeField;
 use ark_poly::DenseUVPolynomial;
@@ -59,9 +60,9 @@ use crate::fec::Shard;
 
 pub use crate::zk::ark_commit as commit;
 
-/// representation of a block of proven data.
+/// Representation of a block of proven data.
 ///
-/// this is a wrapper around a [`crate::fec::Shard`] with some additional cryptographic
+/// This is a wrapper around a [`crate::fec::Shard`] with some additional cryptographic
 /// information that allows to prove the integrity of said shard.
 #[derive(Debug, Clone, Default, PartialEq, CanonicalDeserialize, CanonicalSerialize)]
 pub struct Block<E: Pairing> {
@@ -70,7 +71,7 @@ pub struct Block<E: Pairing> {
     proof: kzg10::Proof<E>,
 }
 
-/// proves $n$ encoded shards by computing one proof for each of them and attaching the commitment
+/// Proves $n$ encoded shards by computing one proof for each of them and attaching the commitments.
 pub fn prove<E, P>(
     commits: Vec<kzg10::Commitment<E>>,
     polynomials: Vec<P>,
@@ -161,13 +162,13 @@ where
     (y, c)
 }
 
-/// for a given Block, verify that the data has been correctly generated
+/// For a given Block, verifies that the data has been correctly generated.
 ///
-/// First, transform data bytes into m polynomial evaluation
-/// compute the hash of the concatenation of these evaluations
-/// compute y as a combination of the shards: y = sum(r^i * Shard_i) for i=[0..m[
-/// compute c as a combination of the commitments: c = sum(r^i * Commit_i) for i=[0..m[
-/// Check if e(c - yG1,G2) == e(proof,(T-alpha)G2)
+/// - transforms data bytes into $m$ polynomial evaluations
+/// - computes the hash of the concatenation of these evaluations
+/// - computes $y$ as a combination of the shards: $$y = \sum(r^i s_i)$$
+/// - computes $c$ as a combination of the commitments: $$c = \sum(r^i c_i)$$
+/// - checks that $$E(c - [y]_1, [1]_2) = E(\pi\_\alpha, [\tau - \alpha]_2)$$
 pub fn verify<E, P>(
     block: &Block<E>,
     pt: E::ScalarField,
@@ -186,10 +187,10 @@ where
     E::pairing(p1, verifier_key.h) == E::pairing(block.proof.w, inner)
 }
 
-/// verify a bunch of blocks at once using a single elliptic pairing.
+/// Verifies a bunch of blocks at once using a single elliptic pairing.
 ///
 /// Rather than checking
-///     e(c - yG_1, G_2) = e(proof, (\tau - \alpha)G_2)
+///     $$E(c - [y]_1, [1]_2) = E(\pi\_\alpha, [\tau - \alpha]_2)$$
 /// for each block individually (see [`verify`]),
 /// we combine the blocks and perform one pairing as follows:
 ///
@@ -199,13 +200,13 @@ where
 /// > - $k$ as the number of blocks given
 ///
 /// 1. compute $r$ as the hash of all the proofs
-/// 2. for each block b_i:
-///    - compute y_i = sum_{j=[0..m[}(r^j * Shard_j)
-///    - compute c_i = sum_{j=[0..m[}(r^j * Commit_j)
-/// 3. combine a combination of proofs and (y, c, \alpha) such as :
-///    proof_agg = sum_{i=[0..k[}(r^i * proof_i)
-///    inner_agg = sum_{i=[0..k[}(r^i * (c_i - y_i G_1 + alpha_i * proof_i))
-/// 4. check e(proof_agg, \tau G_2) = e(inner_agg, G_2)
+/// 2. for each block $b_j$:
+///    - compute $y_j = \sum_{i = 0}^m r^i s_i$
+///    - compute $c_j = \sum_{i = 0}^m r^i c_i$
+/// 3. combine a combination of proofs and $(y, c, \alpha)$ such as :
+///    - $\Pi = \sum_{j = 0}^k r^j \pi_j$
+///    - $\Alpha = \sum_{j = 0}^k r^j (c_j - [y_j]_1 + \alpha_j \pi_j)$
+/// 4. check $E(\Pi, [\tau]_2) = E(\Alpha, [1]_2)$
 pub fn batch_verify<E, P>(
     blocks: &[Block<E>],
     pts: &[E::ScalarField],
-- 
GitLab


From cd741521fc1066aea23bf808c792572e4a0368eb Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Tue, 1 Apr 2025 15:50:48 +0200
Subject: [PATCH 34/56] fix bad links in KZG

---
 src/kzg.rs | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/src/kzg.rs b/src/kzg.rs
index 3fa265f8..5a850073 100644
--- a/src/kzg.rs
+++ b/src/kzg.rs
@@ -168,7 +168,7 @@ where
 /// - computes the hash of the concatenation of these evaluations
 /// - computes $y$ as a combination of the shards: $$y = \sum(r^i s_i)$$
 /// - computes $c$ as a combination of the commitments: $$c = \sum(r^i c_i)$$
-/// - checks that $$E(c - [y]_1, [1]_2) = E(\pi\_\alpha, [\tau - \alpha]_2)$$
+/// - checks that $$E(c - \[y\]_1, \[1\]_2) = E(\pi\_\alpha, \[\tau - \alpha\]_2)$$
 pub fn verify<E, P>(
     block: &Block<E>,
     pt: E::ScalarField,
@@ -190,7 +190,7 @@ where
 /// Verifies a bunch of blocks at once using a single elliptic pairing.
 ///
 /// Rather than checking
-///     $$E(c - [y]_1, [1]_2) = E(\pi\_\alpha, [\tau - \alpha]_2)$$
+///     $$E(c - \[y\]_1, \[1\]_2) = E(\pi\_\alpha, \[\tau - \alpha\]_2)$$
 /// for each block individually (see [`verify`]),
 /// we combine the blocks and perform one pairing as follows:
 ///
@@ -205,8 +205,8 @@ where
 ///    - compute $c_j = \sum_{i = 0}^m r^i c_i$
 /// 3. combine a combination of proofs and $(y, c, \alpha)$ such as :
 ///    - $\Pi = \sum_{j = 0}^k r^j \pi_j$
-///    - $\Alpha = \sum_{j = 0}^k r^j (c_j - [y_j]_1 + \alpha_j \pi_j)$
-/// 4. check $E(\Pi, [\tau]_2) = E(\Alpha, [1]_2)$
+///    - $\Alpha = \sum_{j = 0}^k r^j (c_j - \[y_j\]_1 + \alpha_j \pi_j)$
+/// 4. check $E(\Pi, \[\tau\]_2) = E(\Alpha, \[1\]_2)$
 pub fn batch_verify<E, P>(
     blocks: &[Block<E>],
     pts: &[E::ScalarField],
-- 
GitLab


From e596852270bbcb09fb496cd06637ebf5e09ba768 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Tue, 1 Apr 2025 16:08:58 +0200
Subject: [PATCH 35/56] remove unused `VerifierKey` and `Commitment`

---
 src/aplonk/mod.rs | 21 ---------------------
 1 file changed, 21 deletions(-)

diff --git a/src/aplonk/mod.rs b/src/aplonk/mod.rs
index 9abb3517..8a757430 100644
--- a/src/aplonk/mod.rs
+++ b/src/aplonk/mod.rs
@@ -16,7 +16,6 @@ use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress};
 use ark_std::{test_rng, One, UniformRand};
 use rs_merkle::algorithms::Sha256;
 use rs_merkle::Hasher;
-use std::marker::PhantomData;
 use std::ops::{Div, Mul};
 
 use crate::{
@@ -41,18 +40,6 @@ pub struct Block<E: Pairing> {
     aplonk_proof: E::G2,
 }
 
-/// /!\ [`Commitment`] is not [`CanonicalDeserialize`] because `P` is not [`Send`].
-#[derive(Debug, Clone, Default, PartialEq, CanonicalSerialize)]
-pub struct Commitment<E, P>
-where
-    E: Pairing,
-    P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>,
-    for<'a, 'b> &'a P: Div<&'b P, Output = P>,
-{
-    _engine: PhantomData<E>,
-    _poly: PhantomData<P>,
-}
-
 /// /!\ [`SetupParams`] is not [`Default`] because [`kzg10::UniversalParams`] is not [`Default`].
 #[derive(Debug, Clone, PartialEq, CanonicalSerialize, CanonicalDeserialize)]
 pub struct SetupParams<E: Pairing> {
@@ -60,14 +47,6 @@ pub struct SetupParams<E: Pairing> {
     pub ipa: ipa::Params<E>,
 }
 
-#[derive(Debug, Clone, Default, PartialEq, CanonicalSerialize, CanonicalDeserialize)]
-pub struct VerifierKey<E: Pairing> {
-    pub vk_psi: kzg10::VerifierKey<E>,
-    pub tau_1: E::G1,
-    pub g1: E::G1,
-    pub g2: E::G2,
-}
-
 /// creates a combination of a trusted KZG and an IPA setup for [[aPlonk]]
 ///
 /// > **Note**  
-- 
GitLab


From e616e1ef0216a0673e53211f4707b800413f28ce Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Tue, 1 Apr 2025 16:37:16 +0200
Subject: [PATCH 36/56] fix KZG pairing formulae

---
 src/kzg.mmd | 2 +-
 src/kzg.rs  | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/src/kzg.mmd b/src/kzg.mmd
index 3951be93..aa78608d 100644
--- a/src/kzg.mmd
+++ b/src/kzg.mmd
@@ -15,4 +15,4 @@ sequenceDiagram
     Note right of verifier: $$\ r = H(s_0|...|s_{m - 1})$$
     Note right of verifier: $$\ y = \sum\limits_i r^i s_{\alpha, i}$$
     Note right of verifier: $$\ c = \sum\limits_i r^i c_i$$
-    Note right of verifier: assert $$\ E(c - [y]_1, [1]_2) = E(\pi_{\alpha}, [\tau - \alpha]_2)$$
+    Note right of verifier: assert $$\ E(c - [y]_1, [1]_2) = E(\pi_{\alpha}, [\tau]_2 - [\alpha]_2)$$
diff --git a/src/kzg.rs b/src/kzg.rs
index 5a850073..b5d8d747 100644
--- a/src/kzg.rs
+++ b/src/kzg.rs
@@ -168,7 +168,7 @@ where
 /// - computes the hash of the concatenation of these evaluations
 /// - computes $y$ as a combination of the shards: $$y = \sum(r^i s_i)$$
 /// - computes $c$ as a combination of the commitments: $$c = \sum(r^i c_i)$$
-/// - checks that $$E(c - \[y\]_1, \[1\]_2) = E(\pi\_\alpha, \[\tau - \alpha\]_2)$$
+/// - checks that $$E(c - \[y\]_1, \[1\]_2) = E(\pi\_\alpha, \[\tau\]_2 - \[\alpha\]_2)$$
 pub fn verify<E, P>(
     block: &Block<E>,
     pt: E::ScalarField,
-- 
GitLab


From 2aa2c7a93279f5e8c7c1dc3be320df47da3490db Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Tue, 1 Apr 2025 17:03:33 +0200
Subject: [PATCH 37/56] doc aplonk

---
 src/aplonk/ipa.rs        | 14 ++++-----
 src/aplonk/mod.mmd       | 15 ++++++++++
 src/aplonk/mod.rs        | 50 ++++++++++++++++++++++++++++++--
 src/aplonk/polynomial.rs | 62 ++++++++++++++++++++--------------------
 src/aplonk/transcript.rs |  6 ++--
 5 files changed, 103 insertions(+), 44 deletions(-)
 create mode 100644 src/aplonk/mod.mmd

diff --git a/src/aplonk/ipa.rs b/src/aplonk/ipa.rs
index 5ea33833..74b3a25e 100644
--- a/src/aplonk/ipa.rs
+++ b/src/aplonk/ipa.rs
@@ -11,30 +11,30 @@ use crate::aplonk::polynomial;
 use crate::aplonk::transcript;
 use crate::error::KomodoError;
 
-/// holds the setup parameters of the IPA stage of [aPlonk from [Ambrona et al.]][aPlonK]
+/// Holds the setup parameters of the IPA stage of [aPlonk from [Ambrona et al.]][aPlonK].
 ///
-/// this can be found in [aPlonk from [Ambrona et al.]][aPlonK] in
+/// This can be found in [aPlonk from [Ambrona et al.]][aPlonK] in
 /// - page **13**. in Setup.1
 /// - page **13**. in Setup.3
 ///
 /// [aPlonk]: https://eprint.iacr.org/2022/1352.pdf
 #[derive(Debug, Clone, PartialEq, Default, CanonicalSerialize, CanonicalDeserialize)]
 pub struct Params<E: Pairing> {
-    /// *[\tau]_1* in the paper
+    /// $\[\tau\]_1$ in the paper
     pub tau_1: E::G1,
-    /// *ck_\tau* in the paper
+    /// $\text{ck}_\tau$ in the paper
     pub ck_tau: Vec<E::G2>,
 }
 
 /// holds all the necessary pieces to prove the IPA stage of [aPlonk from [Ambrona et al.]][aPlonK]
 /// this can be found in [aPlonk from [Ambrona et al.]][aPlonK] as
-/// *\pi = ({L_G^j, R_G^j, L_r^j, R_r^j}_{j \in [\kappa]}, \mu^0, G^0)* in
+/// $\pi = ({L_G^j, R_G^j, L_r^j, R_r^j}_{j \in [\kappa]}, \mu^0, G^0)$ in
 /// - page **15**. in IPA.Prove.10
 ///
 /// > **Note**  
 /// > the notations are the same as in the paper, only with all letters in lower
-/// > case and the powers at the bottom, e.g. `l_g_j` instead of *L_G^j*, and
-/// > with *G* rename as `ck_tau`.
+/// > case and the powers at the bottom, e.g. `l_g_j` instead of $L_G^j$, and
+/// > with $G$ rename as `ck_tau`.
 ///
 /// [aPlonk]: https://eprint.iacr.org/2022/1352.pdf
 #[derive(Debug, Clone, Default, PartialEq, CanonicalSerialize, CanonicalDeserialize)]
diff --git a/src/aplonk/mod.mmd b/src/aplonk/mod.mmd
new file mode 100644
index 00000000..75764d2d
--- /dev/null
+++ b/src/aplonk/mod.mmd
@@ -0,0 +1,15 @@
+sequenceDiagram
+    actor prover
+    actor verifier
+
+    Note over prover,verifier: generate trusted setup<br/>$$\ \text{TS} = (([\sigma^j]_1), [\tau]_1, \text{ck}_\tau)$$
+    Note over prover,verifier: $$\tau \leftarrow \mathbb{F}_p$$
+    Note over prover,verifier: $$\text{ck}_\tau = ([\tau^i]_2)$$
+
+    Note left of prover: $$(P_i) = \text{split}(\Delta)$$
+    Note left of prover: $$c = \text{com}(P_i, \text{TS})$$
+    Note left of prover: $$\pi_\alpha = \text{prove}(c, P_i, s_\alpha, \text{TS})$$
+
+    prover->>verifier: $$\pi_{\alpha} \text{, } (c_i)$$
+
+    Note right of verifier: $$\text{verify}(s_\alpha, \pi_\alpha, \text{TS})$$
diff --git a/src/aplonk/mod.rs b/src/aplonk/mod.rs
index 8a757430..475ba2bc 100644
--- a/src/aplonk/mod.rs
+++ b/src/aplonk/mod.rs
@@ -2,6 +2,8 @@
 //!
 //! > references:
 //! > - [Ambrona et al., 2022](https://link.springer.com/chapter/10.1007/978-3-031-41326-1_11)
+//!
+#![doc = simple_mermaid::mermaid!("mod.mmd")]
 use ark_ec::{
     pairing::{Pairing, PairingOutput},
     AffineRepr,
@@ -30,16 +32,27 @@ mod polynomial;
 mod transcript;
 
 #[derive(Debug, Clone, Default, PartialEq, CanonicalSerialize, CanonicalDeserialize)]
+/// Representation of an _aPlonK_ block.
 pub struct Block<E: Pairing> {
     pub shard: Shard<E::ScalarField>,
+    /// $\text{com}_f \in \mathbb{G}_T$
     com_f: PairingOutput<E>,
+    /// $\hat{v} \in \mathbb{F}_p$
     v_hat: E::ScalarField,
+    /// $\hat{\mu} \in \mathbb{G}_1$
     mu_hat: E::G1,
+    /// $\pi_\text{KZG} \in \mathbb{G}_1$
     kzg_proof: kzg10::Proof<E>,
+    /// $\pi_\text{IPA}$
     ipa_proof: ipa::Proof<E>,
+    /// $\pi_{\text{aPlonK}} \in \mathbb{G}_2$
     aplonk_proof: E::G2,
 }
 
+/// Representation of _aPlonK_'s parameters.
+///
+/// This is a wrapper around $\text{KZG}^+$ and IPA parameters.
+///
 /// /!\ [`SetupParams`] is not [`Default`] because [`kzg10::UniversalParams`] is not [`Default`].
 #[derive(Debug, Clone, PartialEq, CanonicalSerialize, CanonicalDeserialize)]
 pub struct SetupParams<E: Pairing> {
@@ -47,10 +60,11 @@ pub struct SetupParams<E: Pairing> {
     pub ipa: ipa::Params<E>,
 }
 
-/// creates a combination of a trusted KZG and an IPA setup for [[aPlonk]]
+/// Creates a combination of a trusted KZG and an IPA setup for [[aPlonk]].
 ///
-/// > **Note**  
-/// > this is an almost perfect translation of the *Setup* algorithm in page
+/// > **Note**
+/// >
+/// > This is an almost perfect translation of the *Setup* algorithm in page
 /// > **13** of [aPlonk from [Ambrona et al.]][aPlonK]
 ///
 /// [aPlonk]: https://eprint.iacr.org/2022/1352.pdf
@@ -84,6 +98,9 @@ where
     })
 }
 
+/// Commits the polynomials.
+///
+/// [`commit`] actually computes $\mu$ and $\text{com}_f$.
 pub fn commit<E, P>(
     polynomials: Vec<P>,
     setup: SetupParams<E>,
@@ -130,6 +147,20 @@ where
     Ok((mu, com_f))
 }
 
+/// Proves the whole data $\Delta$.
+///
+/// For each shard $s_\alpha$:
+/// - $r = \text{hash}(\text{com}_f, \alpha)$
+/// - $f = \sum r^i P_i$
+/// - $\hat{\mu} = \sum r^i \mu_i$
+/// - $\hat{v} = \sum r^i P_i(\alpha)$
+/// - $\pi_{\text{KZG}} = \text{KZG.prove}(\text{TS}, f, \alpha)$
+/// - $(\pi_{\text{IPA}}, u) = \text{IPA.prove}(\text{TS}, \text{com}_f, r, \hat{\mu}, \mu)$
+/// - $\kappa = \log_2(m)$
+/// - $G(X) = G(\kappa, u, u^{-1})$
+/// - $\rho = \text{hash}(\pi_{\text{IPA}})$
+/// - $H = \text{witness}(G, \rho)$
+/// - $\pi_{\text{aPlonK}} = \sum [\tau^i\]_2 H_i$
 pub fn prove<E, P>(
     commit: (Vec<E::G1>, PairingOutput<E>),
     polynomials: Vec<P>,
@@ -260,6 +291,19 @@ where
     Ok(proofs)
 }
 
+/// Verifies that a block is valid.
+///
+/// For a given shard $s_\alpha$:
+/// - $r = \text{hash}(\text{com}_f, \alpha)$
+/// - $\text{ok}_{\text{KZG}} = E(\hat{\mu} - \[\hat{v}\]_1, \[1\]_2) = E(\pi\_{\text{KZG}}, \[\sigma\]_2 - \[\alpha\]_2)$
+/// - $\text{ok}_{\text{IPA}} = \text{IPA.verify'}(\text{com}_f, r, \hat{\mu}, \pi\_{\text{IPA}})$
+/// - $\rho = \text{hash}(\pi_{\text{IPA}})$
+/// - $\kappa = \log_2(m)$
+/// - $u = \text{replay}(\text{com}_f, r, \hat{\mu})$
+/// - $G(X) = G(\kappa, u, u^{-1})$
+/// - $v_{\rho} = G(\rho)$
+/// - $\text{ok}_{\text{aPlonK}} = E(\[\tau\]_1 - \[\rho\]_1, \pi\_{\text{aPlonK}}) = E(\[1\]_1, \pi\_{\text{IPA}}.\text{ck}\_{\tau,0})$
+/// - assert $\text{ok}_{\text{KZG}}$, $\text{ok}\_{\text{IPA}}$ and $\text{ok}\_{\text{aPlonK}}$ are true
 pub fn verify<E, P>(
     block: &Block<E>,
     pt: E::ScalarField,
diff --git a/src/aplonk/polynomial.rs b/src/aplonk/polynomial.rs
index e51508e0..175aaabf 100644
--- a/src/aplonk/polynomial.rs
+++ b/src/aplonk/polynomial.rs
@@ -12,48 +12,48 @@ fn to_binary(number: usize, width: usize) -> Vec<u8> {
         .collect()
 }
 
-/// compute the polynomial *g(X)* in [aPlonk from [Ambrona et al.]][aPlonk]
+/// compute the polynomial $g(X)$ in [aPlonk from [Ambrona et al.]][aPlonk]
 ///
-/// *g(X)* can be found, at
+/// $g(X)$ can be found, at
 /// - page **13**. in *open.7*
 /// - page **13**. in *check.5*
 /// - page **15**. in *IPA.verify.4*
 ///
 /// it's theoretical formula is the following (modified version):  
-/// *g(X) = \Pi_{j=1}^{\kappa = log_2(k)}(u_j^{-1} + u_j X^{2^j})*
+/// $g(X) = \Pi_{j=1}^{\kappa = log_2(k)}(u_j^{-1} + u_j X^{2^j})$
 ///
 /// however this formula is not very convenient, so let's expand this and
 /// compute all the coefficients!
 /// when we do that on small examples:
-/// - *\kappa = 1*: *
-///     g(X) = (u_0^{-1} + u_0 X)
-///          = u_0^{-1} +
-///            u_0 X
-/// *
-/// - *\kappa = 2*: *
-///     g(X) = (u_0^{-1} + u_0 X)(u_1^{-1} + u_1 X^2)
-///          = u_1^{-1} u_0^{-1}     +
-///            u_1^{-1} u_0        X +
-///            u_1      u_0^{-1} X^2 +
-///            u_1      u_0      X^3
-/// *
-/// - *\kappa = 3*: *
-///     g(X) = (u_0^{-1} + u_0 X)(u_1^{-1} + u_1 X^2)(u_2^{-1} + u_2 X^2)
-///          = u_2^{-1} u_1^{-1} u_0^{-1}     +
-///            u_2^{-1} u_1^{-1} u_0        X +
-///            u_2^{-1} u_1      u_0^{-1} X^2 +
-///            u_2^{-1} u_1      u_0      X^3 +
-///            u_2      u_1^{-1} u_0^{-1} X^4 +
-///            u_2      u_1^{-1} u_0      X^5 +
-///            u_2      u_1      u_0^{-1} X^6 +
-///            u_2      u_1      u_0      X^7
-/// *
+/// - $\kappa = 1$: \begin{align} \begin{split}
+///     g(X) &= (u_0^{-1} + u_0 X) \\\\
+///          &=\quad u_0^{-1} + \\\\
+///          &\quad+ u_0 X
+/// \end{split} \end{align}
+/// - $\kappa = 2$: \begin{align} \begin{split}
+///     g(X) &= (u_0^{-1} + u_0 X)(u_1^{-1} + u_1 X^2) \\\\
+///          &=\quad u_1^{-1} u_0^{-1}      \\\\
+///          &\quad+  u_1^{-1} u_0        X \\\\
+///          &\quad+  u_1      u_0^{-1} X^2 \\\\
+///          &\quad+  u_1      u_0      X^3
+/// \end{split} \end{align}
+/// - $\kappa = 3$: \begin{align} \begin{split}
+///     g(X) &= (u_0^{-1} + u_0 X)(u_1^{-1} + u_1 X^2)(u_2^{-1} + u_2 X^2) \\\\
+///          &=\quad u_2^{-1} u_1^{-1} u_0^{-1}     \\\\
+///          &\quad+ u_2^{-1} u_1^{-1} u_0        X \\\\
+///          &\quad+ u_2^{-1} u_1      u_0^{-1} X^2 \\\\
+///          &\quad+ u_2^{-1} u_1      u_0      X^3 \\\\
+///          &\quad+ u_2      u_1^{-1} u_0^{-1} X^4 \\\\
+///          &\quad+ u_2      u_1^{-1} u_0      X^5 \\\\
+///          &\quad+ u_2      u_1      u_0^{-1} X^6 \\\\
+///          &\quad+ u_2      u_1      u_0      X^7
+/// \end{split} \end{align}
 ///
-/// we can see that the *j*-the coefficient of *g(X)* for a given *\kappa* is
-/// a product of a combination of *(u_i)* and their inverse elements directly
-/// related to the binary representation of the *j* polynomial power, e.g.
-/// - with *\kappa = 3* and *j = 6*, the binary is *110* and the coefficient is
-///   *u_0 \times u_1 \times u_2^{-1}*
+/// we can see that the $j$-the coefficient of $g(X)$ for a given $\kappa$ is
+/// a product of a combination of $(u_i)$ and their inverse elements directly
+/// related to the binary representation of the $j$ polynomial power, e.g.
+/// - with $\kappa = 3$ and $j = 6$, the binary is $110$ and the coefficient is
+///   $u_0 \times u_1 \times u_2^{-1}$
 ///
 /// [aPlonk]: https://eprint.iacr.org/2022/1352.pdf
 pub(super) fn compute_g<E, P>(
diff --git a/src/aplonk/transcript.rs b/src/aplonk/transcript.rs
index 8b0b8e14..aca7373d 100644
--- a/src/aplonk/transcript.rs
+++ b/src/aplonk/transcript.rs
@@ -5,7 +5,7 @@ use rs_merkle::{algorithms::Sha256, Hasher};
 
 /// initialize the transcript of IPA
 ///
-/// this can be found in [aPlonk from [Ambrona et al.]][aPlonK] as *ts := (C_G, r, P)* in
+/// this can be found in [aPlonk from [Ambrona et al.]][aPlonK] as $ts := (C_G, r, P)$ in
 /// - page **15**. in IPA.Prove.1.
 ///
 /// [aPlonk]: https://eprint.iacr.org/2022/1352.pdf
@@ -24,7 +24,7 @@ pub(super) fn initialize<E: Pairing>(
 
 /// reset the transcript of IPA
 ///
-/// this can be found in [aPlonk from [Ambrona et al.]][aPlonK] as *ts := u_j* in
+/// this can be found in [aPlonk from [Ambrona et al.]][aPlonK] as $ts := u_j$ in
 /// - page **15**. in IPA.Prove.5.
 /// - page **15**. in IPA.Verify.3.
 ///
@@ -39,7 +39,7 @@ pub(super) fn reset<E: Pairing>(u: E::ScalarField) -> Result<Vec<u8>, Serializat
 /// hash curve elements into the transcript of IPA
 ///
 /// this can be found in [aPlonk from [Ambrona et al.]][aPlonK] as
-/// *ts := Hash(L_G^j, R_G^j, L_r^j, R_r^j, ts)* in
+/// $ts := \text{hash}(L_G^j, R_G^j, L_r^j, R_r^j, ts)$ in
 /// - page **15**. in IPA.Prove.5.
 /// - page **15**. in IPA.Verify.3.
 ///
-- 
GitLab


From 57ba97916e2b77fc5f29c3938b12491950976a5c Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Tue, 1 Apr 2025 17:37:05 +0200
Subject: [PATCH 38/56] add trusted setup to semi_avid.commit

---
 src/semi_avid.mmd | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/src/semi_avid.mmd b/src/semi_avid.mmd
index 35914fe3..2f617c1a 100644
--- a/src/semi_avid.mmd
+++ b/src/semi_avid.mmd
@@ -5,11 +5,11 @@ sequenceDiagram
     Note over prover,verifier: generate trusted setup<br/>$$\ \text{TS} = ([\tau^i]_1)$$
 
     Note left of prover: $$(P_j) = \text{split}(\Delta)$$
-    Note left of prover: $$c_j = \text{com}(P_j)$$
+    Note left of prover: $$c_j = \text{com}(P_j, \text{TS})$$
 
     prover->>verifier: $$(c_j)$$
 
     Note right of verifier: $$(\lambda_j) = \text{lincomb}(s_\alpha)$$
     Note right of verifier: $$P_\alpha = \text{poly}(s_\alpha)$$
     Note right of verifier: $$\hat{c} = \sum\limits \lambda_j c_j$$
-    Note right of verifier: assert $$\ \hat{c} = \text{com}(P_{\alpha})$$
+    Note right of verifier: assert $$\ \hat{c} = \text{com}(P_{\alpha}, \text{TS})$$
-- 
GitLab


From d552d7cfb595a355789fd35d606219e536c36b88 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Tue, 1 Apr 2025 17:39:16 +0200
Subject: [PATCH 39/56] add code markdown things

---
 src/kzg.rs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/kzg.rs b/src/kzg.rs
index b5d8d747..299eda36 100644
--- a/src/kzg.rs
+++ b/src/kzg.rs
@@ -6,7 +6,7 @@
 //!
 //! # The protocol
 //! Here, we assume that the input data has been encoded with a _Reed-Solomon_ encoding, as can be
-//! done with the [crate::fec] module.
+//! done with the [`crate::fec`] module.
 //!
 //! > **Note**
 //! >
-- 
GitLab


From cd60037af6abedb85e43b1a6cc07cf07e0eaf0ad Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Thu, 3 Apr 2025 13:04:06 +0200
Subject: [PATCH 40/56] remove RUSTDOCFLAGS from make.rs

---
 make.rs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/make.rs b/make.rs
index 6fa31a67..baaada36 100755
--- a/make.rs
+++ b/make.rs
@@ -116,7 +116,7 @@ fn main() {
             if *open { cmd.push("--open") }
             if *private { cmd.push("--document-private-items") }
             if *features { cmd.push("--all-features") }
-            nob::run_cmd_as_vec_and_fail!(cmd ; "RUSTDOCFLAGS" => "--html-in-header katex.html");
+            nob::run_cmd_as_vec_and_fail!(cmd);
         },
         None => {}
     }
-- 
GitLab


From 987acbda398a53070c4a0c64754798e791245f44 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Wed, 16 Apr 2025 14:58:47 +0200
Subject: [PATCH 41/56] change wording in `split_data_into_field_elements`

---
 src/algebra/mod.rs | 7 ++-----
 1 file changed, 2 insertions(+), 5 deletions(-)

diff --git a/src/algebra/mod.rs b/src/algebra/mod.rs
index c72e584a..dc2292b1 100644
--- a/src/algebra/mod.rs
+++ b/src/algebra/mod.rs
@@ -15,12 +15,9 @@ pub mod linalg;
 
 /// Splits a sequence of raw bytes into valid field elements in $\mathbb{F}_p$.
 ///
-/// [`split_data_into_field_elements`] supports padding the output vector of
-/// elements by giving a number that needs to divide the length of the vector, i.e. if we denote
-/// the number of output elements by $o$ and the desired length multiple by $m$ (`modulus`), then
-/// we have
+/// The size of the output vector is a multiple of the provided `modulus` argument.
 ///
-/// $$ m | o $$
+/// If necessary, the output vector is padded with $1$ in $\mathbb{F}_p$.
 ///
 /// # Example
 /// In the following example $\mathbb{F}_p$ is a small finite field with prime order $2^{16} + 1$ and which
-- 
GitLab


From db658d603a90ce7201b6399ce652723fc6ccd503 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Wed, 16 Apr 2025 15:02:11 +0200
Subject: [PATCH 42/56] impl "compatible shards" MR review

---
 src/fec.rs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/fec.rs b/src/fec.rs
index 79ff6a06..2c491ac6 100644
--- a/src/fec.rs
+++ b/src/fec.rs
@@ -112,7 +112,7 @@ pub fn recode_with_coeffs<F: PrimeField>(shards: &[Shard<F>], coeffs: &[F]) -> O
     Some(s)
 }
 
-/// Computes a recoded shard from an arbitrary set of shards.
+/// Computes a recoded shard from an arbitrary set of compatible shards.
 ///
 /// Coefficients will be drawn at random, one for each shard.
 ///
-- 
GitLab


From b017dec5b394d8a2369b6b3941e33d0a3de289ae Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Wed, 16 Apr 2025 15:18:08 +0200
Subject: [PATCH 43/56] impl "degree d" MR review

---
 src/zk.rs | 12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)

diff --git a/src/zk.rs b/src/zk.rs
index 7c4452f1..d68bc4d9 100644
--- a/src/zk.rs
+++ b/src/zk.rs
@@ -24,10 +24,13 @@ use crate::error::KomodoError;
 
 /// A ZK trusted setup.
 ///
-/// This is a simple wrapper around a sequence of elements of the curve, the first $d$ powers of a
+/// This is a simple wrapper around a sequence of elements of the curve, the first $t$ powers of a
 /// _toxic waste_ element $\tau$ on $\mathbb{G}_1$.
 ///
-/// $$ \text{TS} = ([\tau^j]_1)\_{0 \leq j \leq d - 1} $$
+/// $$ \text{TS} = ([\tau^j]_1)\_{0 \leq j \leq t - 1} $$
+///
+/// Usually, a trusted setup will be used to commit a polynomial of degree $d = \text{deg}(P)$.
+/// Then, the trusted setup will contain $d + 1$ elements.
 ///
 /// > **Note**
 /// >
@@ -56,7 +59,7 @@ impl<F: PrimeField, G: CurveGroup<ScalarField = F>> IntoIterator for Powers<F, G
 ///
 /// If $P = (p_j)$ is the polynomial to commit and $\tau$ is the secret, then [`Commitment`] will
 /// hold
-/// $$\text{com}(P) = [P(\tau)]_1 = \sum\limits\_{j = 0}^{\text{deg}(P) - 1} p_j [\tau^j]_1$$
+/// $$\text{com}(P) = [P(\tau)]_1 = \sum\limits\_{j = 0}^{\text{deg}(P)} p_j [\tau^j]_1$$
 ///
 /// > **Note**
 /// >
@@ -64,7 +67,8 @@ impl<F: PrimeField, G: CurveGroup<ScalarField = F>> IntoIterator for Powers<F, G
 #[derive(Debug, Clone, Copy, Default, CanonicalSerialize, CanonicalDeserialize, PartialEq)]
 pub struct Commitment<F: PrimeField, G: CurveGroup<ScalarField = F>>(pub G::Affine);
 
-/// Creates a trusted setup of a given size, the expected maximum degree of the data.
+/// Creates a trusted setup of a given size, the expected maximum degree of the data as seen as a
+/// polynomial.
 ///
 /// > **Note**
 /// >
-- 
GitLab


From d87f3b20d287f3522cc30b46c95992aeff96581c Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Wed, 16 Apr 2025 15:23:42 +0200
Subject: [PATCH 44/56] impl "math notation" MR review

---
 src/semi_avid.rs | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/src/semi_avid.rs b/src/semi_avid.rs
index d2c9d1e1..5c9b8184 100644
--- a/src/semi_avid.rs
+++ b/src/semi_avid.rs
@@ -16,6 +16,11 @@
 //! used: _the commitment of a linear combination of polynomials is equal to the same linear
 //! combination of the commiments of the same polynomials_.
 //!
+//! > **Note**
+//! >
+//! > In the following, we denote by $\text{com}$ the commitment operation and by
+//! > $\mathbb{F}_p\[X\]$ the ring of all polynomials of one variable over $\mathbb{F}_p$.
+//!
 //! $$\forall (\alpha_i) \in \mathbb{F}_p, (P_i) \in \mathbb{F}_p\[X\], \quad \text{com}\left(\sum\limits_i \alpha_i P_i\right) = \sum\limits_i \alpha_i \text{com}(P_i)$$
 //!
 //! This give us a simple, lightweight and fast commitment scheme.
-- 
GitLab


From 66ce3d603245971febe7d1b5c597da49e8660bf4 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Wed, 16 Apr 2025 15:24:25 +0200
Subject: [PATCH 45/56] impl "number of row/col" MR review

---
 src/semi_avid.rs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/semi_avid.rs b/src/semi_avid.rs
index 5c9b8184..941ad1a1 100644
--- a/src/semi_avid.rs
+++ b/src/semi_avid.rs
@@ -28,7 +28,7 @@
 //! > **Note**
 //! >
 //! > In the following, the data $\Delta$ is arranged in an $m \times k$ matrix and $i$ will denote
-//! the number of a row and $j$ those of a column
+//! the number of a row and $j$ the number of a column
 //! > - $0 \leq i \leq m - 1$
 //! > - $0 \leq j \leq k - 1$
 #![doc = simple_mermaid::mermaid!("semi_avid.mmd")]
-- 
GitLab


From be432d19e769a88280dd977b23957298903a8311 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Wed, 16 Apr 2025 15:29:10 +0200
Subject: [PATCH 46/56] impl "s alpha" MR review

---
 src/semi_avid.rs | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/src/semi_avid.rs b/src/semi_avid.rs
index 941ad1a1..ab1cb072 100644
--- a/src/semi_avid.rs
+++ b/src/semi_avid.rs
@@ -31,6 +31,12 @@
 //! the number of a row and $j$ the number of a column
 //! > - $0 \leq i \leq m - 1$
 //! > - $0 \leq j \leq k - 1$
+//!
+//! Let’s explain with a very simple example how things operate with Semi-AVID. The setup is that a
+//! prover wants to show a verifier that a shard of encoded data $s_\alpha$ has indeed been
+//! generated with a linear combination of the $k$ source shards from data $\Delta$. $\alpha$ is
+//! the number that identifies shard $s_\alpha$ and $\text{lincomb}(s_\alpha)$ is the linear combination
+//! used to compute $s_\alpha$ from the $k$ source shards.
 #![doc = simple_mermaid::mermaid!("semi_avid.mmd")]
 //!
 //! # Example
-- 
GitLab


From 4e824bd5604851e2b5d6a72b006115277511d7e3 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Wed, 16 Apr 2025 15:30:01 +0200
Subject: [PATCH 47/56] impl "note consistency" MR review

---
 src/semi_avid.rs | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/src/semi_avid.rs b/src/semi_avid.rs
index ab1cb072..b647953d 100644
--- a/src/semi_avid.rs
+++ b/src/semi_avid.rs
@@ -233,8 +233,10 @@ impl<F: PrimeField, G: CurveGroup<ScalarField = F>> std::fmt::Display for Block<
 ///
 /// Coefficients will be drawn at random, one for each block.
 ///
-/// If the blocks appear to come from different data, e.g. if the commits are
-/// different, an error will be returned.
+/// > **Note**
+/// >
+/// > If the blocks appear to come from different data, e.g. if the commits are
+/// > different, an error will be returned.
 ///
 /// > **Note**
 /// >
-- 
GitLab


From d3233b6869f82b3303946527f796190642729586 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Wed, 16 Apr 2025 15:31:07 +0200
Subject: [PATCH 48/56] impl "col row" MR review

---
 src/kzg.rs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/kzg.rs b/src/kzg.rs
index 299eda36..79438297 100644
--- a/src/kzg.rs
+++ b/src/kzg.rs
@@ -11,7 +11,7 @@
 //! > **Note**
 //! >
 //! > In the following, the data $\Delta$ is arranged in an $m \times k$ matrix and $i$ will denote
-//! > the number of a row and $j$ those of a column
+//! > the number of a row and $j$ the number of a column
 //! > - $0 \leq i \leq m - 1$
 //! > - $0 \leq j \leq k - 1$
 #![doc = simple_mermaid::mermaid!("kzg.mmd")]
-- 
GitLab


From 2c05b02fc4442b67a5f55e8a9221300c4c384d83 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Wed, 16 Apr 2025 15:35:53 +0200
Subject: [PATCH 49/56] impl "H and E" MR review

---
 src/kzg.rs | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/src/kzg.rs b/src/kzg.rs
index 79438297..7e335d02 100644
--- a/src/kzg.rs
+++ b/src/kzg.rs
@@ -14,6 +14,11 @@
 //! > the number of a row and $j$ the number of a column
 //! > - $0 \leq i \leq m - 1$
 //! > - $0 \leq j \leq k - 1$
+//! >
+//! > Also, $H$ is a secure hash function and
+//! > $E: \mathbb{G}_1 \times \mathbb{G}_2 \mapsto \mathbb{G}_T$ is the bilinear pairing mapping
+//! > defined on _pairing-friendly_ elliptic curves $(\mathbb{G}_1, \mathbb{G}_2, \mathbb{G}_T)$
+//! > such as BLS12-381.
 #![doc = simple_mermaid::mermaid!("kzg.mmd")]
 //!
 //! Conveniently, each one of the $n$ encoded shards is a linear combination of the $k$ source
-- 
GitLab


From 244e7ce1afdb4e816d40b33d7fbb1d645989fcda Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Wed, 16 Apr 2025 15:40:09 +0200
Subject: [PATCH 50/56] impl "example" MR review

---
 src/lib.rs | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/src/lib.rs b/src/lib.rs
index 99f23367..a8ee0778 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -23,7 +23,11 @@
 //! > **Note**
 //! >
 //! > the following example uses some syntax of Rust but is NOT valid Rust code and omits a lot of
-//! > details for both Rust and Komodo
+//! > details for both Rust and Komodo.
+//! >
+//! > Real complete examples can be found in the
+//! > [`examples/`](https://gitlab.isae-supaero.fr/dragoon/komodo/-/tree/main/examples)
+//! > directory in the repository.
 //!
 //! 1. choose an _encoding matrix_ to encode the _input data_
 //! ```ignore
-- 
GitLab


From f0b22ca31c4057b16207e42a99f89d3d255e408c Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Wed, 16 Apr 2025 15:41:06 +0200
Subject: [PATCH 51/56] impl "_usize" MR review

---
 src/semi_avid.rs | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/src/semi_avid.rs b/src/semi_avid.rs
index b647953d..829ac342 100644
--- a/src/semi_avid.rs
+++ b/src/semi_avid.rs
@@ -59,7 +59,7 @@
 //! # fn main() {
 //! let mut rng = ark_std::test_rng();
 //!
-//! let (k, n) = (3, 6_usize);
+//! let (k, n) = (3, 6);
 //! let bytes = include_bytes!("../assets/dragoon_133x133.png").to_vec();
 //! # }
 //! ```
@@ -69,7 +69,7 @@
 //! # fn main() {
 //! # let mut rng = ark_std::test_rng();
 //! #
-//! # let (k, n) = (3, 6_usize);
+//! # let (k, n) = (3, 6);
 //! # let bytes = include_bytes!("../assets/dragoon_133x133.png").to_vec();
 //! #
 //! let powers = komodo::zk::setup::<F, G>(bytes.len(), &mut rng).unwrap();
@@ -85,7 +85,7 @@
 //! # fn main() {
 //! # let mut rng = ark_std::test_rng();
 //! #
-//! # let (k, n) = (3, 6_usize);
+//! # let (k, n) = (3, 6);
 //! # let bytes = include_bytes!("../assets/dragoon_133x133.png").to_vec();
 //! #
 //! # let powers = komodo::zk::setup::<F, G>(bytes.len(), &mut rng).unwrap();
@@ -106,7 +106,7 @@
 //! # fn main() {
 //! # let mut rng = ark_std::test_rng();
 //! #
-//! # let (k, n) = (3, 6_usize);
+//! # let (k, n) = (3, 6);
 //! # let bytes = include_bytes!("../assets/dragoon_133x133.png").to_vec();
 //! #
 //! # let powers = komodo::zk::setup::<F, G>(bytes.len(), &mut rng).unwrap();
@@ -131,7 +131,7 @@
 //! # fn main() {
 //! # let mut rng = ark_std::test_rng();
 //! #
-//! # let (k, n) = (3, 6_usize);
+//! # let (k, n) = (3, 6);
 //! # let bytes = include_bytes!("../assets/dragoon_133x133.png").to_vec();
 //! #
 //! # let powers = komodo::zk::setup::<F, G>(bytes.len(), &mut rng).unwrap();
-- 
GitLab


From 6b9258fc92f643bc2690b512ec7883429bddb6ec Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Wed, 16 Apr 2025 15:43:59 +0200
Subject: [PATCH 52/56] impl "bytes len" MR review

---
 src/semi_avid.rs | 12 +++++++-----
 1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/src/semi_avid.rs b/src/semi_avid.rs
index 829ac342..37053396 100644
--- a/src/semi_avid.rs
+++ b/src/semi_avid.rs
@@ -63,7 +63,9 @@
 //! let bytes = include_bytes!("../assets/dragoon_133x133.png").to_vec();
 //! # }
 //! ```
-//! - then, Semi-AVID requires a trusted setup to prove and verify
+//! - then, Semi-AVID requires a trusted setup to prove and verify. Enough trusted setup will be
+//! created to support data as big as $10 \times 1024$ elements of $\mathbb{F}_p$, to allow users
+//! to reuse it with multiple files of varying lengths.
 //! ```
 //! # use ark_bls12_381::{Fr as F, G1Projective as G};
 //! # fn main() {
@@ -72,7 +74,7 @@
 //! # let (k, n) = (3, 6);
 //! # let bytes = include_bytes!("../assets/dragoon_133x133.png").to_vec();
 //! #
-//! let powers = komodo::zk::setup::<F, G>(bytes.len(), &mut rng).unwrap();
+//! let powers = komodo::zk::setup::<F, G>(10 * 1_024, &mut rng).unwrap();
 //! # }
 //! ```
 //! - we can now build an encoding matrix, encode the data, prove the shards and build [`Block`]s
@@ -88,7 +90,7 @@
 //! # let (k, n) = (3, 6);
 //! # let bytes = include_bytes!("../assets/dragoon_133x133.png").to_vec();
 //! #
-//! # let powers = komodo::zk::setup::<F, G>(bytes.len(), &mut rng).unwrap();
+//! # let powers = komodo::zk::setup::<F, G>(10 * 1_024, &mut rng).unwrap();
 //! #
 //! let encoding_mat = &komodo::algebra::linalg::Matrix::random(k, n, &mut rng);
 //! let shards = komodo::fec::encode(&bytes, encoding_mat).unwrap();
@@ -109,7 +111,7 @@
 //! # let (k, n) = (3, 6);
 //! # let bytes = include_bytes!("../assets/dragoon_133x133.png").to_vec();
 //! #
-//! # let powers = komodo::zk::setup::<F, G>(bytes.len(), &mut rng).unwrap();
+//! # let powers = komodo::zk::setup::<F, G>(10 * 1_024, &mut rng).unwrap();
 //! #
 //! # let encoding_mat = &komodo::algebra::linalg::Matrix::random(k, n, &mut rng);
 //! # let shards = komodo::fec::encode(&bytes, encoding_mat).unwrap();
@@ -134,7 +136,7 @@
 //! # let (k, n) = (3, 6);
 //! # let bytes = include_bytes!("../assets/dragoon_133x133.png").to_vec();
 //! #
-//! # let powers = komodo::zk::setup::<F, G>(bytes.len(), &mut rng).unwrap();
+//! # let powers = komodo::zk::setup::<F, G>(10 * 1_024, &mut rng).unwrap();
 //! #
 //! # let encoding_mat = &komodo::algebra::linalg::Matrix::random(k, n, &mut rng);
 //! # let shards = komodo::fec::encode(&bytes, encoding_mat).unwrap();
-- 
GitLab


From c1ec59fe1aafb3e482a63a8cc79a262ff43bccad Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Wed, 16 Apr 2025 15:47:04 +0200
Subject: [PATCH 53/56] impl "first k shards" MR review

---
 src/semi_avid.rs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/semi_avid.rs b/src/semi_avid.rs
index 37053396..4efbf974 100644
--- a/src/semi_avid.rs
+++ b/src/semi_avid.rs
@@ -123,7 +123,7 @@
 //! }
 //! # }
 //! ```
-//! - and decoded using any $k$ of the shards
+//! - and decoded using any $k$ of the shards, here the first $k$
 //! ```
 //! # use ark_bls12_381::{Fr as F, G1Projective as G};
 //! # use ark_poly::univariate::DensePolynomial as DP;
-- 
GitLab


From 231f7d0c98af581a741876987fae7775be0f1f03 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Wed, 16 Apr 2025 15:49:03 +0200
Subject: [PATCH 54/56] impl "same TS" MR review

---
 src/semi_avid.rs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/semi_avid.rs b/src/semi_avid.rs
index 4efbf974..2812f108 100644
--- a/src/semi_avid.rs
+++ b/src/semi_avid.rs
@@ -98,7 +98,7 @@
 //! let blocks = build::<F, G, DP<F>>(&shards, &proof);
 //! # }
 //! ```
-//! - finally, each [`Block`] can be verified individually
+//! - finally, each [`Block`] can be verified individually, using the same trusted setup
 //! ```
 //! # use ark_bls12_381::{Fr as F, G1Projective as G};
 //! # use ark_poly::univariate::DensePolynomial as DP;
-- 
GitLab


From 670908832e7fb6b635ad029ddfa359ba315838b3 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Tue, 22 Apr 2025 09:47:25 +0200
Subject: [PATCH 55/56] impl "zk top-level" MR review

---
 src/zk.rs | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/src/zk.rs b/src/zk.rs
index d68bc4d9..b61098b1 100644
--- a/src/zk.rs
+++ b/src/zk.rs
@@ -1,4 +1,5 @@
-//! A replacement for Arkworks' KZG10 module.
+//! A replacement for Arkworks' KZG10 module, providing tools to build _trusted setups_ and
+//! commit polynomials.
 //!
 //! This module mostly redefines [`ark_poly_commit::kzg10::KZG10::setup`] and
 //! [`ark_poly_commit::kzg10::KZG10::commit`] to be used with [`crate::semi_avid`].
-- 
GitLab


From 2ca29bfd9aa53f52da05b11d001e3d3e5deec783 Mon Sep 17 00:00:00 2001
From: "a.stevan" <antoine.stevan@isae-supaero.fr>
Date: Tue, 22 Apr 2025 12:19:17 +0200
Subject: [PATCH 56/56] impl "bytes len" MR review 2

---
 src/semi_avid.rs | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/src/semi_avid.rs b/src/semi_avid.rs
index 2812f108..f581b0d8 100644
--- a/src/semi_avid.rs
+++ b/src/semi_avid.rs
@@ -63,9 +63,9 @@
 //! let bytes = include_bytes!("../assets/dragoon_133x133.png").to_vec();
 //! # }
 //! ```
-//! - then, Semi-AVID requires a trusted setup to prove and verify. Enough trusted setup will be
-//! created to support data as big as $10 \times 1024$ elements of $\mathbb{F}_p$, to allow users
-//! to reuse it with multiple files of varying lengths.
+//! - then, Semi-AVID requires a trusted setup to prove and verify. This example shows a trusted
+//! setup big enough to support data as big as $10 \times 1024$ elements of $\mathbb{F}_p$, to
+//! allow users to reuse it with multiple files of varying lengths.
 //! ```
 //! # use ark_bls12_381::{Fr as F, G1Projective as G};
 //! # fn main() {
-- 
GitLab