Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • dragoon/komodo
  • a.stevan/komodo
  • c.heme/komodo
3 results
Show changes
Showing with 1176 additions and 299 deletions
/// measure the time it takes to apply a function on a set of arguments and returns the result of
/// the call
///
/// ```rust
/// fn add(a: i32, b: i32) { a + b }
/// let (res, time) = timeit!(add, 1, 2);
/// ```
/// will be the same as
/// ```rust
/// fn add(a: i32, b: i32) { a + b }
/// let (res, time) = {
/// let start = Instant::now();
/// let res = add(1, 2);
/// let time = start.elapsed();
/// (res, time)
/// };
/// ```
#[macro_export]
macro_rules! timeit {
($func:expr, $( $args:expr ),*) => {{
let start = Instant::now();
let res = $func( $( $args ),* );
let time = start.elapsed();
(res, time)
}};
}
/// same as [`timeit`] but prints a name and the time at the end directly
///
/// ```rust
/// fn add(a: i32, b: i32) { a + b }
/// let res = timeit_and_print!("addition", add, 1, 2);
/// ```
/// will be the same as
/// ```rust
/// fn add(a: i32, b: i32) { a + b }
/// let res = {
/// print!("addition: ");
/// let start = Instant::now();
/// let res = add(1, 2);
/// let time = start.elapsed();
/// println!("{}", time.as_nanos());
/// res
/// };
/// ```
#[macro_export]
macro_rules! timeit_and_print {
($name: expr, $func:expr, $( $args:expr ),*) => {{
print!("{}: ", $name);
let (res, time) = timeit!($func, $($args),*);
println!("{}", time.as_nanos());
res
}};
}
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.16.21/dist/katex.min.css" integrity="sha384-zh0CIslj+VczCZtlzBcjt5ppRcsAmDnRem7ESsYwWwg3m/OaJ2l4x7YBZl9Kxxib" crossorigin="anonymous">
<script defer src="https://cdn.jsdelivr.net/npm/katex@0.16.21/dist/katex.min.js" integrity="sha384-Rma6DA2IPUwhNxmrB/7S3Tno0YY7sFu9WSYMCuulLhIqYSGZ2gKCJWIqhBWqMQfh" crossorigin="anonymous"></script>
<script defer src="https://cdn.jsdelivr.net/npm/katex@0.16.21/dist/contrib/auto-render.min.js" integrity="sha384-hCXGrW6PitJEwbkoStFjeJxv+fSOOQKOPbJxSfM6G5sWZjAyWhXiTIIAmQqnlLlh" crossorigin="anonymous"
onload="renderMathInElement(document.body);"></script>
<script>
document.addEventListener("DOMContentLoaded", function() {
renderMathInElement(document.body, {
delimiters: [
{left: "$$", right: "$$", display: true},
{left: "\\(", right: "\\)", display: false},
{left: "$", right: "$", display: false},
{left: "\\[", right: "\\]", display: true}
]
});
});
</script>
#!/usr/bin/env run-cargo-script
//! ```cargo
//! [package]
//! name = "komodo-make"
//! version = "1.0.0"
//! edition = "2021"
//!
//! [dependencies]
//! nob = { git = "https://gitlab.isae-supaero.fr/a.stevan/nob.rs", rev = "7ea6be855cf5600558440def6e59a83f78b8b543" }
//! clap = { version = "4.5.17", features = ["derive"] }
//!
//! # for `container --list`
//! serde = { version = "1.0", features = ["derive"] }
//! serde_json = "1.0"
//! prettytable = "0.10.0"
//! ```
use clap::{Parser, Subcommand};
use prettytable::{format, Cell, Row, Table};
use serde_json::Value;
const REGISTRY: &str = "gitlab-registry.isae-supaero.fr";
const MIRROR_REGISTRY: &str = "ghcr.io/dragoon-rs";
const IMAGE: &str = "dragoon/komodo";
const DOCKERFILE: &str = ".env.dockerfile";
#[derive(Parser)]
#[command(version, about, long_about = None)]
struct Cli {
#[command(subcommand)]
command: Option<Commands>,
}
#[derive(Subcommand)]
enum Commands {
/// Formats the code.
Fmt {
/// Only checks instead of really formatting.
#[arg(short, long)]
check: bool,
},
/// Checks the code.
Check,
/// Runs Clippy.
Clippy,
/// Runs the tests.
Test {
/// Be extra verbose with the output of the tests.
#[arg(short, long)]
verbose: bool,
/// Run the examples instead of regular tests.
#[arg(short, long)]
examples: bool,
},
/// Shows the version of all the tools used,
Version,
/// Builds the documentation
Doc {
/// Open the documentation in the browser.
#[arg(short, long)]
open: bool,
/// Document private items.
#[arg(short, long)]
private: bool,
/// Document all features.
#[arg(short, long)]
features: bool,
},
/// Run all that is needed for the Continuous Integration of the project.
CI {
/// Run the "fmt" stage of the Continuous Integration
#[arg(short, long)]
fmt: bool,
/// Run the "test" stage of the Continuous Integration
#[arg(short, long)]
test: bool,
/// Be extra verbose with the output of the Continuous Integration.
#[arg(short, long)]
verbose: bool,
},
/// Builds the container.
#[command(subcommand)]
Container(ContainerCommands),
}
#[derive(Subcommand)]
enum ContainerCommands {
/// Build the current dockerfile.
Build,
/// List the local images.
List {
/// Print the output table as NDJSON instead of pretty table.
#[arg(long)]
json: bool,
},
/// Log into the registry instead of building.
Login,
/// Push to the registry instead of building.
Push,
}
fn fmt(check: bool) {
if check {
nob::run_cmd_and_fail!("cargo", "fmt", "--all", "--", "--check");
} else {
nob::run_cmd_and_fail!("cargo", "fmt", "--all");
}
}
fn check() {
let cmd = vec!["cargo", "check", "--workspace", "--all-targets"];
extend_and_run(&cmd, &[]);
extend_and_run(&cmd, &["--features", "kzg"]);
extend_and_run(&cmd, &["--features", "aplonk"]);
extend_and_run(&cmd, &["--all-features"]);
}
fn clippy() {
nob::run_cmd_and_fail!(
"cargo",
"clippy",
"--workspace",
"--all-targets",
"--all-features",
"--",
"-D",
"warnings"
);
}
fn test(verbose: bool, examples: bool) {
let mut cmd = vec!["cargo", "test"];
if verbose {
cmd.push("--verbose")
}
if examples {
cmd.push("--examples");
} else {
cmd.push("--workspace");
cmd.push("--all-features");
}
nob::run_cmd_as_vec_and_fail!(cmd);
}
fn version() {
nob::run_cmd_and_fail!(@"rustup", "--version", "2>", "/dev/null");
nob::run_cmd_and_fail!(@"rustup", "show", "active-toolchain");
nob::run_cmd_and_fail!(@"rustc", "--version");
nob::run_cmd_and_fail!(@"cargo", "--version");
nob::run_cmd_and_fail!(@"cargo", "clippy", "--version");
}
fn doc(open: bool, private: bool, features: bool) {
let mut cmd = vec!["cargo", "doc", "--no-deps"];
if open {
cmd.push("--open")
}
if private {
cmd.push("--document-private-items")
}
if features {
cmd.push("--all-features")
}
nob::run_cmd_as_vec_and_fail!(cmd);
}
fn main() {
let cli = Cli::parse();
match &cli.command {
Some(Commands::Fmt { check }) => fmt(*check),
Some(Commands::Check) => check(),
Some(Commands::Clippy) => clippy(),
Some(Commands::Test { verbose, examples }) => test(*verbose, *examples),
Some(Commands::Version) => version(),
Some(Commands::Doc {
open,
private,
features,
}) => doc(*open, *private, *features),
Some(Commands::CI {
fmt: fmt_stage,
test: test_stage,
verbose,
}) => match (fmt_stage, test_stage) {
(false, false) | (true, true) => {
fmt(true);
version();
check();
clippy();
test(*verbose, false);
test(*verbose, true);
}
(true, false) => fmt(true),
(false, true) => {
version();
check();
clippy();
test(*verbose, false);
test(*verbose, true);
}
},
Some(Commands::Container(container_cmd)) => {
let res = nob::run_cmd_and_fail!(@+"git", "rev-parse", "HEAD");
let sha = String::from_utf8(res.stdout).expect("Invalid UTF-8 string");
let repo = format!("{}/{}", REGISTRY, IMAGE);
let image = format!("{}:{}", repo, sha.trim());
let mirror_repo = format!("{}/{}", MIRROR_REGISTRY, IMAGE);
let mirror_image = format!("{}:{}", mirror_repo, sha.trim());
match container_cmd {
ContainerCommands::Login => {
nob::run_cmd_and_fail!("docker", "login", REGISTRY);
nob::run_cmd_and_fail!("docker", "login", MIRROR_REGISTRY);
}
ContainerCommands::Build => {
let cmd = vec!["docker", "build", ".", "--file", DOCKERFILE];
extend_and_run(&cmd, &["-t", &image]);
extend_and_run(&cmd, &["-t", &mirror_image]);
}
ContainerCommands::List { json } => {
let cmd = vec!["docker", "image", "list", "--format", "json"];
let images = extend_and_run_and_capture_silent(&cmd, &[&repo])
+ &extend_and_run_and_capture_silent(&cmd, &[&mirror_repo]);
if *json {
println!("{}", images);
} else {
docker_images_to_table(images).printstd();
}
}
ContainerCommands::Push => {
nob::run_cmd_and_fail!("docker", "push", &image);
nob::run_cmd_and_fail!("docker", "push", &mirror_image);
}
}
}
None => {}
}
}
fn docker_images_to_table(lines: String) -> Table {
let mut rows: Vec<Vec<String>> = Vec::new();
let mut headers: Vec<String> = Vec::new();
for line in lines.lines() {
if let Value::Object(map) = serde_json::from_str(&line).unwrap_or_else(|_| Value::Null) {
if headers.is_empty() {
headers = map.keys().cloned().collect();
}
let row: Vec<String> = headers
.iter()
.map(|key| map.get(key).map_or("".to_string(), |v| v.to_string()))
.collect();
rows.push(row);
}
}
let mut table = Table::new();
table.set_format(*format::consts::FORMAT_NO_BORDER_LINE_SEPARATOR);
table.set_titles(Row::new(headers.iter().map(|h| Cell::new(h)).collect()));
for row in rows {
table.add_row(Row::new(row.iter().map(|v| Cell::new(v)).collect()));
}
table
}
// NOTE: this could be migrated to [`nob.rs`](https://gitlab.isae-supaero.fr/a.stevan/nob.rs)
fn extend_and_run(cmd: &[&str], args: &[&str]) {
let mut cmd = cmd.to_vec();
cmd.extend_from_slice(&args);
nob::run_cmd_as_vec_and_fail!(cmd);
}
// NOTE: this could be migrated to [`nob.rs`](https://gitlab.isae-supaero.fr/a.stevan/nob.rs)
fn extend_and_run_and_capture_silent(cmd: &[&str], args: &[&str]) -> String {
let mut cmd = cmd.to_vec();
cmd.extend_from_slice(&args);
String::from_utf8(nob::run_cmd_as_vec_and_fail!(@+cmd).stdout).expect("Invalid UTF-8 string")
}
[toolchain]
profile = "minimal"
channel = "1.75"
components = ["rustfmt", "clippy"]
channel = "1.78"
components = ["rustfmt", "clippy", "rust-analyzer"]
const GH_API_OPTIONS = [
-H "Accept: application/vnd.github+json"
-H "X-GitHub-Api-Version: 2022-11-28"
]
def "str color" [color: string]: [ string -> string ] {
$"(ansi $color)($in)(ansi reset)"
}
def __log [level: string, color: string, msg: string] {
print $"[(ansi $color)($level)(ansi reset)] ($msg)"
}
def "log error" [msg: string] { __log "ERR" "red" $msg }
def "log info" [msg: string] { __log "INF" "cyan" $msg }
def "log ok" [msg: string] { __log " OK" "green" $msg }
^$nu.current-exe ./scripts/check-nushell-version.nu
def main [base: string, mirror: string, branch: string] {
let base_remote = random uuid
let mirror_remote = random uuid
log info "adding remotes"
git remote add $base_remote $base
git remote add $mirror_remote $mirror
log info "fetching"
git fetch --quiet $base_remote
git fetch --quiet $mirror_remote
let base = git rev-parse $"($base_remote)/($branch)" | str trim
let mirror = git rev-parse $"($mirror_remote)/($branch)" | str trim
log info "cleaning"
git remote remove $base_remote
git remote remove $mirror_remote
if $base != $mirror {
let hist = git rev-list $"($mirror)..($base)" | lines
log error "mirror is out of date"
{
b: ($base | str substring 0..<7),
m: ($mirror | str substring 0..<7),
h: ($hist | length),
}
| print $" ($in.b | str color green) | ($in.m | str color red) \(($in.h) commits behind\)"
} else {
log ok "mirror is up to date"
}
log info "pulling mirror runs"
let res = gh api ...$GH_API_OPTIONS /repos/dragoon-rs/komodo/actions/runs | from json
let runs = $res.workflow_runs
| where head_branch == $branch
| select id head_sha status conclusion run_started_at
| into datetime run_started_at
| sort-by run_started_at
$env.config.table = {
mode: compact,
index_mode: always,
show_empty: true,
padding: { left: 0, right: 0 },
header_on_separator: true,
trim: {
methodology: wrapping,
wrapping_try_keep_words: true,
},
abbreviated_row_count: null,
footer_inheritance: false,
}
print $runs
}
let config = open .nu.cfg
| lines
| parse "{key}: {value}"
| transpose --header-row
| into record
if (version).commit_hash != $config.REVISION or (version).version != $config.VERSION {
print --stderr $"(ansi yellow_bold)Warning(ansi reset): unexpected version"
print --stderr $" expected (ansi green)($config.VERSION)@($config.REVISION)(ansi reset)"
print --stderr $" found (ansi red)((version).version)@((version).commit_hash)(ansi reset)"
}
const GH_API_OPTIONS = [
-H "Accept: application/vnd.github+json"
-H "X-GitHub-Api-Version: 2022-11-28"
]
const GITHUB_MIRROR = "dragoon-rs/komodo"
^$nu.current-exe ./scripts/check-nushell-version.nu
def main [branch: string]: [ nothing -> string ] {
let res = gh api ...$GH_API_OPTIONS $"/repos/($GITHUB_MIRROR)/actions/runs" | from json
let runs = $res.workflow_runs
| where head_branch == $branch
| select id head_sha status conclusion run_started_at
| into datetime run_started_at
| sort-by run_started_at
$runs
| update id { $"[`($in)`]\(https://github.com/($GITHUB_MIRROR)/actions/runs/($in)\)" }
| update run_started_at { format date "%Y-%m-%dT%H:%M:%S" }
| to md --pretty
}
//! some linear algebra fun
//! Some linear algebra fun over elements in $\mathbb{F}_p$.
//!
//! this module mainly contains an implementation of matrices over a finite
//! field.
//! This module mainly contains an implementation of matrices over a finite
//! field $\mathbb{F}_p$.
use ark_ff::Field;
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
use ark_std::rand::{Rng, RngCore};
use crate::error::KomodoError;
/// a matrix defined over a finite field
/// A matrix defined over a finite field $\mathbb{F}_p$.
///
/// internally, a matrix is just a vector of field elements that whose length is
/// Internally, a matrix is just a vector of field elements whose length is
/// exactly the width times the height and where elements are organized row by
/// row.
#[derive(Clone, PartialEq, Default, Debug, CanonicalSerialize, CanonicalDeserialize)]
pub struct Matrix<T: Field> {
/// $h \times w$ elements in $\mathbb{F}_p$.
pub elements: Vec<T>,
/// the number of rows $h$.
pub height: usize,
/// the number of columns $w$.
pub width: usize,
}
impl<T: Field> Matrix<T> {
/// build a matrix from a diagonal of elements
/// Builds a matrix from a diagonal of elements in $\mathbb{F}_p$.
///
/// # Example
/// building a diagonal matrix from the diagonal $[1, 2, 3, 4]$ will give
/// ```text
/// [
/// [1, 0, 0, 0],
/// [0, 2, 0, 0],
/// [0, 0, 3, 0],
/// [0, 0, 0, 4],
/// ]
/// ```
/// Building a diagonal matrix from the diagonal $(1, 2, 3, 4)$ would give
/// $ \begin{pmatrix}
/// 1 & . & . & . \\\\
/// . & 2 & . & . \\\\
/// . & . & 3 & . \\\\
/// . & . & . & 4 \\\\
/// \end{pmatrix} $.
fn from_diagonal(diagonal: Vec<T>) -> Self {
let size = diagonal.len();
......@@ -49,34 +50,42 @@ impl<T: Field> Matrix<T> {
}
}
/// build the identity matrix of a given size
/// Builds the identity matrix $I_n$ of a given size $n$.
///
/// # Example
/// the identity of size 3 is
/// ```text
/// [
/// [1, 0, 0],
/// [0, 1, 0],
/// [0, 0, 1],
/// ]
/// ```
/// The identity of size $3$ is
/// $ I_3 = \begin{pmatrix}
/// 1 & . & . \\\\
/// . & 1 & . \\\\
/// . & . & 1 \\\\
/// \end{pmatrix} $.
fn identity(size: usize) -> Self {
Self::from_diagonal(vec![T::one(); size])
}
/// build a Vandermonde matrix for some seed points
/// Builds a _Vandermonde_ matrix for some _seed points_.
///
/// actually, this is the tranpose of the Vandermonde matrix defined in the
/// Actually, this is the transpose of the Vandermonde matrix defined in the
/// [Wikipedia article][article], i.e. there are as many columns as there
/// are seed points and there are as many rows as there are powers of the
/// seed points.
/// are seed points, the $(\alpha_i)_{1 \leq i \leq m}$, and there are as
/// many rows, $n$, as there are powers of the seed points.
///
/// $ M = V_n(\alpha_1, ..., \alpha_m)^T = \begin{pmatrix}
/// 1 & 1 & ... & 1 \\\\
/// \alpha_1 & \alpha_2 & ... & \alpha_m \\\\
/// \alpha_1^2 & \alpha_2^2 & ... & \alpha_m^2 \\\\
/// \vdots & \vdots & \ddots & \vdots \\\\
/// \alpha_1^{n - 1} & \alpha_2^{n - 1} & ... & \alpha_m^{n - 1} \\\\
/// \end{pmatrix} $
///
/// > **Note**
/// > if you are sure the points are distinct and don't want to perform any
/// > **Note**
/// >
/// > If you are sure the points are distinct and don't want to perform any
/// > runtime check to ensure that condition, have a look at
/// > [`Self::vandermonde_unchecked`].
///
/// # Example
/// Let's compute $V_4(0, 1, 2, 3, 4)^T$:
/// ```rust
/// # use ark_ff::Field;
/// # use komodo::algebra::linalg::Matrix;
......@@ -108,11 +117,11 @@ impl<T: Field> Matrix<T> {
for i in 0..points.len() {
for j in (i + 1)..points.len() {
if points[i] == points[j] {
return Err(KomodoError::InvalidVandermonde(
i,
j,
format!("{}", points[i]),
));
return Err(KomodoError::InvalidVandermonde {
first_index: i,
second_index: j,
value_repr: format!("{}", points[i]),
});
}
}
}
......@@ -120,7 +129,7 @@ impl<T: Field> Matrix<T> {
Ok(Self::vandermonde_unchecked(points, height))
}
/// the unchecked version of [`Self::vandermonde`]
/// The unchecked version of [`Self::vandermonde`].
pub fn vandermonde_unchecked(points: &[T], height: usize) -> Self {
let width = points.len();
......@@ -142,7 +151,7 @@ impl<T: Field> Matrix<T> {
}
}
/// build a completely random matrix of shape $n \times m$
/// Builds a completely random matrix of shape $n \times m$.
pub fn random<R: RngCore>(n: usize, m: usize, rng: &mut R) -> Self {
Self {
elements: (0..(n * m)).map(|_| T::from(rng.gen::<u128>())).collect(),
......@@ -151,10 +160,11 @@ impl<T: Field> Matrix<T> {
}
}
/// build a matrix from a "_matrix_" of elements
/// Builds a matrix from a "_matrix_" of elements.
///
/// > **Note**
/// > if you are sure each row should have the same length and don't want to
/// > **Note**
/// >
/// > If you are sure each row should have the same length and don't want to
/// > perform any runtime check to ensure that condition, have a look at
/// > [`Self::from_vec_vec_unchecked`].
///
......@@ -206,19 +216,18 @@ impl<T: Field> Matrix<T> {
let width = matrix[0].len();
for (i, row) in matrix.iter().enumerate() {
if row.len() != width {
return Err(KomodoError::InvalidMatrixElements(format!(
"expected rows to be of same length {}, found {} at row {}",
width,
row.len(),
i
)));
return Err(KomodoError::InvalidMatrixElements {
expected: width,
found: row.len(),
row: i,
});
}
}
Ok(Self::from_vec_vec_unchecked(matrix))
}
/// the unchecked version of [`Self::from_vec_vec`]
/// The unchecked version of [`Self::from_vec_vec`].
pub fn from_vec_vec_unchecked(matrix: Vec<Vec<T>>) -> Self {
let height = matrix.len();
let width = matrix[0].len();
......@@ -246,10 +255,11 @@ impl<T: Field> Matrix<T> {
self.elements[i * self.width + j] = value;
}
/// extract a single column from the matrix
/// Extracts a single column from the matrix.
///
/// > **Note**
/// > returns `None` if the provided index is out of bounds
/// >
/// > Returns `None` if the provided index is out of bounds.
pub(crate) fn get_col(&self, j: usize) -> Option<Vec<T>> {
if j >= self.width {
return None;
......@@ -258,14 +268,14 @@ impl<T: Field> Matrix<T> {
Some((0..self.height).map(|i| self.get(i, j)).collect())
}
// compute _row / value_
/// Computes $\text{row} = \frac{\text{row}}{\text{value}}$.
fn divide_row_by(&mut self, row: usize, value: T) {
for j in 0..self.width {
self.set(row, j, self.get(row, j) / value);
}
}
// compute _destination = destination + source * value_
/// Computes $\text{destination} = \text{destination} + \text{source} \times \text{value}$.
fn multiply_row_by_and_add_to_row(&mut self, source: usize, value: T, destination: usize) {
for j in 0..self.width {
self.set(
......@@ -276,12 +286,11 @@ impl<T: Field> Matrix<T> {
}
}
/// compute the inverse of the matrix
/// Computes the inverse of the matrix.
///
/// > **None**
/// > the matrix should be
/// > - square
/// > - invertible
/// If $M \in \mathcal{M}_{n \times n}(\mathbb{F}_p)$ is an invertible matrix,
/// then [`Self::invert`] computes $M^{-1}$ such that
/// $$ MM^{-1} = M^{-1}M = I_n$$
pub fn invert(&self) -> Result<Self, KomodoError> {
if self.height != self.width {
return Err(KomodoError::NonSquareMatrix(self.height, self.width));
......@@ -311,27 +320,30 @@ impl<T: Field> Matrix<T> {
Ok(inverse)
}
/// swap rows `i` and `j`, inplace
/// Swaps rows $i$ and $j$, inplace.
///
/// > **Note**
/// > this function assumes both `i` and `j` are in bounds, unexpected
/// > results are expected if `i` or `j` are out of bounds.
/// >
/// > This function assumes both $i$ and $j$ are in bounds, unexpected
/// > results are expected if $i$ or $j$ are out of bounds.
fn swap_rows(&mut self, i: usize, j: usize) {
for k in 0..self.width {
self.elements.swap(i * self.width + k, j * self.width + k);
}
}
/// compute the rank of the matrix
/// Computes the rank of the matrix.
///
/// > **None**
/// > see the [_Wikipedia article_](https://en.wikipedia.org/wiki/Rank_(linear_algebra))
/// > for more information
/// Let $M \in \mathcal{M}_{n \times m}(\mathbb{F}_p)$ and $r(M)$ its rank:
/// - the rank is always smaller than the min between the height and the
/// width of any matrix, $r(M) \leq \min(n, m)$
/// - a square and invertible matrix will have _full rank_, i.e. it will
/// be equal to its size, if $M$ is invertible, then $r(M) = n$
///
/// > **Note**
/// >
/// > - the rank is always smaller than the min between the height and the
/// > width of any matrix.
/// > - a square and invertible matrix will have _full rank_, i.e. it will
/// > be equal to its size.
/// > See the [_Wikipedia article_](https://en.wikipedia.org/wiki/Rank_(linear_algebra))
/// > for more information
pub fn rank(&self) -> usize {
let mut mat = self.clone();
let mut i = 0;
......@@ -371,22 +383,22 @@ impl<T: Field> Matrix<T> {
nb_non_zero_rows
}
/// compute the matrix multiplication with another matrix
/// Computes the matrix multiplication with another matrix.
///
/// if `mat` represents a matrix $A$ and `rhs` is the representation of
/// another matrix $B$, then `mat.mul(rhs)` will compute $A \times B$
/// Let $A \in \mathcal{M}_{a \times b}(\mathbb{F}_p) \sim \texttt{lhs}$ and
/// $B \in \mathcal{M}\_{c \times d}(\mathbb{F}_p) \sim \texttt{rhs}$ then
/// `lhs.mul(rhs)` will compute $A \times B$.
///
/// > **Note**
/// > both matrices should have compatible shapes, i.e. if `self` has shape
/// > `(n, m)` and `rhs` has shape `(p, q)`, then `m == p`.
/// >
/// > Both matrices should have compatible shapes, i.e. if `self` has shape
/// > `(a, b)` and `rhs` has shape `(c, d)`, then `b == c`.
pub fn mul(&self, rhs: &Self) -> Result<Self, KomodoError> {
if self.width != rhs.height {
return Err(KomodoError::IncompatibleMatrixShapes(
self.height,
self.width,
rhs.height,
rhs.width,
));
return Err(KomodoError::IncompatibleMatrixShapes {
left: (self.height, self.width),
right: (rhs.height, rhs.width),
});
}
let height = self.height;
......@@ -409,9 +421,10 @@ impl<T: Field> Matrix<T> {
})
}
/// compute the transpose of the matrix
/// Computes the transpose of the matrix.
///
/// > **Note**
/// >
/// > see the [_Wikipedia article_](https://en.wikipedia.org/wiki/Transpose)
pub fn transpose(&self) -> Self {
let height = self.width;
......@@ -433,11 +446,11 @@ impl<T: Field> Matrix<T> {
}
}
/// truncate the matrix to the provided shape, from right and bottom
/// Truncates the matrix to the provided shape, from right and bottom.
///
/// # Example
/// if a matrix has shape `(10, 11)` and is truncated to `(5, 7)`, the 5
/// bottom rows and 4 right columns will be removed.
/// If a matrix has shape $(10, 11)$ and is truncated to $(5, 7)$, the $5$
/// bottom rows and $4$ right columns will be removed.
pub(crate) fn truncate(&self, rows: Option<usize>, cols: Option<usize>) -> Self {
let width = if let Some(w) = cols {
self.width - w
......@@ -606,10 +619,14 @@ mod tests {
let matrix = Matrix::<Fr>::from_vec_vec(mat_to_elements(vec![vec![0], vec![0, 0]]));
assert!(matrix.is_err());
assert!(matches!(
assert_eq!(
matrix.err().unwrap(),
KomodoError::InvalidMatrixElements(..)
));
KomodoError::InvalidMatrixElements {
expected: 1,
found: 2,
row: 1,
}
);
}
#[test]
......@@ -651,10 +668,13 @@ mod tests {
]))
.unwrap();
assert!(matches!(
assert_eq!(
a.mul(&Matrix::<Fr>::from_vec_vec(mat_to_elements(vec![vec![1, 2]])).unwrap()),
Err(KomodoError::IncompatibleMatrixShapes(3, 3, 1, 2))
));
Err(KomodoError::IncompatibleMatrixShapes {
left: (3, 3),
right: (1, 2)
})
);
let product = a.mul(&b).unwrap();
let expected = Matrix::<Fr>::from_vec_vec(mat_to_elements(vec![
......@@ -705,17 +725,11 @@ mod tests {
.unwrap()
.invert();
assert!(inverse.is_err());
assert!(matches!(
inverse.err().unwrap(),
KomodoError::NonSquareMatrix(..)
));
assert_eq!(inverse.err().unwrap(), KomodoError::NonSquareMatrix(2, 3));
let inverse = Matrix::<Fr>::from_diagonal(vec_to_elements(vec![0, 3, 4])).invert();
assert!(inverse.is_err());
assert!(matches!(
inverse.err().unwrap(),
KomodoError::NonInvertibleMatrix(0)
));
assert_eq!(inverse.err().unwrap(), KomodoError::NonInvertibleMatrix(0));
let inverse = Matrix::<Fr>::from_vec_vec(mat_to_elements(vec![
vec![1, 1, 0],
......@@ -725,19 +739,22 @@ mod tests {
.unwrap()
.invert();
assert!(inverse.is_err());
assert!(matches!(
inverse.err().unwrap(),
KomodoError::NonInvertibleMatrix(1)
));
assert_eq!(inverse.err().unwrap(), KomodoError::NonInvertibleMatrix(1));
}
#[test]
fn vandermonde() {
assert!(Matrix::<Fr>::vandermonde(&vec_to_elements(vec![0, 4, 2, 3, 4]), 4).is_err());
assert_eq!(
Matrix::<Fr>::vandermonde(&vec_to_elements(vec![0, 4, 2, 3, 4]), 4),
Err(KomodoError::InvalidVandermonde {
first_index: 1,
second_index: 4,
value_repr: "4".to_string()
}),
);
assert!(Matrix::<Fr>::vandermonde(&vec_to_elements(vec![0, 1, 2, 3, 4]), 4).is_ok());
let actual =
Matrix::<Fr>::vandermonde_unchecked(&mat_to_elements(vec![vec![0, 1, 2, 3, 4]])[0], 4);
let actual = Matrix::<Fr>::vandermonde_unchecked(&vec_to_elements(vec![0, 1, 2, 3, 4]), 4);
#[rustfmt::skip]
let expected = Matrix::from_vec_vec(mat_to_elements(vec![
vec![1, 1, 1, 1, 1],
......
//! Manipulate finite field elements
//! Manipulate elements from finite field $\mathbb{F}_p$.
#[cfg(any(feature = "kzg", feature = "aplonk"))]
use ark_ec::pairing::Pairing;
#[cfg(feature = "aplonk")]
......@@ -13,17 +13,18 @@ use std::ops::{Div, Mul};
pub mod linalg;
/// split a sequence of raw bytes into valid field elements
/// Splits a sequence of raw bytes into valid field elements in $\mathbb{F}_p$.
///
/// [`split_data_into_field_elements`] supports padding the output vector of
/// elements by giving a number that needs to divide the length of the vector.
/// The size of the output vector is a multiple of the provided `modulus` argument.
///
/// If necessary, the output vector is padded with $1$ in $\mathbb{F}_p$.
///
/// # Example
/// In the following example `Fp` is a small finite field with prime order $65537$ and which
/// In the following example $\mathbb{F}_p$ is a small finite field with prime order $2^{16} + 1$ and which
/// requires only two bytes to represent elements.
///
/// 1. splitting `0x02000300`, which contains 4 bytes, will result in two elements of `Fp`, i.e. 2
/// and 3
/// 1. splitting `0x02000300`, which contains $4$ bytes, will result in two elements of $\mathbb{F}_p$, i.e. $2$
/// and $3$
/// ```
/// # #[derive(ark_ff::MontConfig)]
/// # #[modulus = "65537"]
......@@ -40,9 +41,9 @@ pub mod linalg;
/// );
/// # }
/// ```
/// 2. splitting `0x0200030004000500`, which contains 8 bytes, and asking for a multiple of 3
/// elements, will result in 6 elements of `Fp`, i.e. 2, 3, 4 and 5 which come from the data and
/// two padding elements, set to 1.
/// 2. splitting `0x0200030004000500`, which contains $8$ bytes, and asking for a multiple of $3$
/// elements, will result in $6$ elements of $\mathbb{F}_p$, i.e. $2$, $3$, $4$ and $5$ which come from the data and
/// two padding elements, set to $1$.
/// ```
/// # #[derive(ark_ff::MontConfig)]
/// # #[modulus = "65537"]
......@@ -67,7 +68,7 @@ pub mod linalg;
/// # }
/// ```
pub fn split_data_into_field_elements<F: PrimeField>(bytes: &[u8], modulus: usize) -> Vec<F> {
let bytes_per_element = (F::MODULUS_BIT_SIZE as usize) / 8;
let bytes_per_element = (F::MODULUS_BIT_SIZE as usize - 1) / 8;
let mut elements = Vec::new();
for chunk in bytes.chunks(bytes_per_element) {
......@@ -81,9 +82,11 @@ pub fn split_data_into_field_elements<F: PrimeField>(bytes: &[u8], modulus: usiz
elements
}
/// merges elliptic curve elements back into a sequence of bytes
/// Merges elements of $\mathbb{F}_p$ back into a sequence of bytes.
///
/// this is the inverse operation of [`split_data_into_field_elements`].
/// > **Note**
/// >
/// > This is the inverse operation of [`split_data_into_field_elements`].
pub(crate) fn merge_elements_into_bytes<F: PrimeField>(elements: &[F]) -> Vec<u8> {
let mut bytes = vec![];
for e in elements {
......@@ -96,11 +99,16 @@ pub(crate) fn merge_elements_into_bytes<F: PrimeField>(elements: &[F]) -> Vec<u8
}
#[cfg(any(feature = "kzg", feature = "aplonk"))]
/// compute the linear combination of polynomials
/// Computes the linear combination of polynomials.
///
/// [`scalar_product_polynomial`] computes the linear combination $P$ of $n$
/// polynomials $(P_i) \in \mathbb{F}_p\[X\]^n \sim \texttt{lhs}$ with
/// coefficients $(c_i) \in \mathbb{F}_p^n \sim \texttt{rhs}$ as
///
/// if the _lhs_ are the coefficients, $(c_i)$ in a field $\mathbb{F}$, and the _rhs_ are the
/// polynomials, $(p_i)$ with coefficients in $\mathbb{F}$, then the result of this is
/// $$P(X) = \sum\limits_{i = 0}^{n - 1} c_i p_i(X)$$
/// $$P(X) = \sum\limits_{i = 0}^{n - 1} c_i P_i(X)$$
///
/// ## Preconditions
/// - `lhs` and `rhs` should contain the same number of elements.
pub(crate) fn scalar_product_polynomial<E, P>(lhs: &[E::ScalarField], rhs: &[P]) -> P
where
E: Pairing,
......@@ -121,12 +129,19 @@ where
}
#[cfg(feature = "aplonk")]
/// compute the scalar product between vectors of elements in $G_1$ and in $G_2$ respectively
/// Computes the "_scalar product_" between vectors of elements in $\mathbb{G}_1$ and in $\mathbb{G}_2$ respectively.
///
/// [`scalar_product_pairing`] computes the "_pairing combination_" $c$ of $(a_i) \in \mathbb{G}_1^n \sim \texttt{lhs}$ and
/// $(b_i) \in \mathbb{G}_2^n \sim \texttt{rhs}$ as
///
/// $$ c = \sum\limits_{i = 0}^{n - 1} E(a_i, b_i) $$
///
/// where $E$ is a [bilinear mapping] from $\mathbb{G}_1 \times \mathbb{G}_2 \rightarrow \mathbb{G}_T$.
///
/// ## Preconditions
/// - `lhs` and `rhs` should contain the same number of elements.
///
/// if the _lhs_ are the elements of $G_1$, $(a_i)$, and the _rhs_ are the ones from $G_2$, $(b_i)$,
/// then the result of this is
/// $$c = \sum\limits_{i = 0}^{n - 1} E(a_i, b_i)$$
/// where $E$ is a bilinear mapping from $G_1 \times G_2 \rightarrow G_T$
/// [bilinear mapping]: <https://en.wikipedia.org/wiki/Bilinear_map>
pub(super) fn scalar_product_pairing<E: Pairing>(lhs: &[E::G1], rhs: &[E::G2]) -> PairingOutput<E> {
lhs.iter()
.zip(rhs.iter())
......@@ -135,11 +150,19 @@ pub(super) fn scalar_product_pairing<E: Pairing>(lhs: &[E::G1], rhs: &[E::G2]) -
}
#[cfg(feature = "aplonk")]
/// compute the scalar product between vectors of elements of a finite field $\mathbb{F}$
/// Computes the [scalar product] between vectors of elements of a finite field $\mathbb{F}_p$
/// associated with a "_pairing-friendly_" [elliptic curve] $(\mathbb{G}_1, \mathbb{G}_2, \mathbb{G}_T)$.
///
/// if _lhs_ is the first vector, $(a_i)$, and _rhs_ is the second, $(b_i)$, then the result of this
/// is
/// $$c = \sum\limits_{i = 0}^{n - 1} a_i b_i$$
/// [`scalar_product`] computes the [scalar product] $c$ of $(a_i) \in \mathbb{F}_p^n \sim \texttt{lhs}$ and
/// $(b_i) \in \mathbb{F}_p^n \sim \texttt{rhs}$ as
///
/// $$ c = a \cdot b = \sum\limits_{i = 0}^{n - 1} a_i b_i $$
///
/// ## Preconditions
/// - `lhs` and `rhs` should contain the same number of elements.
///
/// [scalar product]: <https://en.wikipedia.org/wiki/Dot_product>
/// [elliptic curve]: <https://en.wikipedia.org/wiki/Elliptic_curve>
pub(super) fn scalar_product<E: Pairing>(
lhs: &[E::ScalarField],
rhs: &[E::ScalarField],
......@@ -148,13 +171,40 @@ pub(super) fn scalar_product<E: Pairing>(
}
#[cfg(feature = "aplonk")]
/// see [`scalar_product`], but with _lhs_ a vector from $G_1$
/// Computes a linear combination of elements of a finite field $\mathbb{F}_p$ associated with a
/// "_pairing-friendly_" [elliptic curve] $(\mathbb{G}_1, \mathbb{G}_2, \mathbb{G}_T)$.
///
/// [`scalar_product_g1`] computes the linear combination $c$ of the $(a_i) \in \mathbb{G}_1^n \sim \texttt{lhs}$
/// with coefficients $(c_i) \in \mathbb{F}_p^n \sim \texttt{rhs}$ as
///
/// $$ c = \sum\limits_{i = 0}^{n - 1} c_i a_i $$
///
/// > **Note**
/// >
/// > [`scalar_product_g1`] is the same as [`scalar_product`], but with elements from $\mathbb{G}_1$.
///
/// ## Preconditions
/// - `lhs` and `rhs` should contain the same number of elements.
///
/// [elliptic curve]: <https://en.wikipedia.org/wiki/Elliptic_curve>
pub(super) fn scalar_product_g1<E: Pairing>(lhs: &[E::G1], rhs: &[E::ScalarField]) -> E::G1 {
lhs.iter().zip(rhs.iter()).map(|(l, r)| l.mul(r)).sum()
}
#[cfg(feature = "aplonk")]
/// see [`scalar_product`], but with _lhs_ a vector from $G_2$
/// Computes a linear combination of elements of a finite field $\mathbb{F}_p$ associated with a
/// "_pairing-friendly_" [elliptic curve] $(\mathbb{G}_1, \mathbb{G}_2, \mathbb{G}_T)$.
///
/// [`scalar_product_g2`] computes the linear combination $c$ of the $(a_i) \in \mathbb{G}_2^n \sim \texttt{lhs}$
/// with coefficients $(c_i) \in \mathbb{F}_p^n \sim \texttt{rhs}$ as
///
/// $$ c = \sum\limits_{i = 0}^{n - 1} c_i a_i $$
///
/// > **Note**
/// >
/// > [`scalar_product_g2`] is the same as [`scalar_product`], but with elements from $\mathbb{G}_2$.
///
/// [elliptic curve]: <https://en.wikipedia.org/wiki/Elliptic_curve>
pub(super) fn scalar_product_g2<E: Pairing>(lhs: &[E::G2], rhs: &[E::ScalarField]) -> E::G2 {
lhs.iter().zip(rhs.iter()).map(|(l, r)| l.mul(r)).sum()
}
......@@ -163,7 +213,7 @@ pub(super) fn scalar_product_g2<E: Pairing>(lhs: &[E::G2], rhs: &[E::ScalarField
pub(super) mod vector {
use ark_ff::Zero;
/// return [0, 0, ..., 0] of size *n* on some group
/// Returns $(0, ..., 0) \in \mathbb{F}_p^n$.
pub fn zero<Z: Zero + Clone>(capacity: usize) -> Vec<Z> {
let mut vector = Vec::with_capacity(capacity);
vector.resize(capacity, Z::zero());
......@@ -172,12 +222,14 @@ pub(super) mod vector {
}
}
/// compute the successive powers of a scalar group element
/// Computes the successive powers of a scalar $r$ in a field $\mathbb{F}_p$ associated with a
/// "_pairing-friendly_" [elliptic curve] $(\mathbb{G}_1, \mathbb{G}_2, \mathbb{G}_T)$.
///
/// if the scalar number is called *r*, then [`powers_of`] will return the
/// following vector:
/// [1, r, r^2, ..., r^(n-1)]
/// where *n* is the number of powers
/// [`powers_of`] will compute a vector $R$ from a scalar $r \in \mathbb{F}_p$ as
///
/// $$ R = (1, r, r^2, ..., r^{n-1}) $$
///
/// where $n$ is the desired number of powers.
#[cfg(any(feature = "kzg", feature = "aplonk"))]
pub(crate) fn powers_of<E: Pairing>(step: E::ScalarField, nb_powers: usize) -> Vec<E::ScalarField> {
let mut powers = Vec::with_capacity(nb_powers);
......@@ -246,13 +298,11 @@ mod tests {
split_data_template::<Fr>(&[], 1, None);
split_data_template::<Fr>(&[], 8, None);
let nb_bytes = 11 * (Fr::MODULUS_BIT_SIZE as usize / 8);
split_data_template::<Fr>(&bytes()[..nb_bytes], 1, Some(11));
split_data_template::<Fr>(&bytes()[..nb_bytes], 8, Some(16));
let nb_bytes = 11 * (Fr::MODULUS_BIT_SIZE as usize / 8) - 10;
split_data_template::<Fr>(&bytes()[..nb_bytes], 1, Some(11));
split_data_template::<Fr>(&bytes()[..nb_bytes], 8, Some(16));
const MODULUS_BYTE_SIZE: usize = Fr::MODULUS_BIT_SIZE as usize / 8;
for n in (10 * MODULUS_BYTE_SIZE + 1)..(11 * MODULUS_BYTE_SIZE) {
split_data_template::<Fr>(&bytes()[..n], 1, Some(11));
split_data_template::<Fr>(&bytes()[..n], 8, Some(16));
}
}
fn split_and_merge_template<F: PrimeField>(bytes: &[u8], modulus: usize) {
......@@ -264,10 +314,9 @@ mod tests {
#[test]
fn split_and_merge() {
split_and_merge_template::<Fr>(&bytes(), 1);
split_and_merge_template::<Fr>(&bytes(), 8);
split_and_merge_template::<Fr>(&bytes(), 64);
split_and_merge_template::<Fr>(&bytes(), 4096);
for i in 0..12 {
split_and_merge_template::<Fr>(&bytes(), 1 << i);
}
}
#[cfg(any(feature = "kzg", feature = "aplonk"))]
......@@ -277,10 +326,9 @@ mod tests {
const POWER: usize = 10;
let r = E::ScalarField::rand(rng);
assert_eq!(
super::powers_of::<E>(r, POWER + 1).last().unwrap(),
&r.pow([POWER as u64])
);
let res = super::powers_of::<E>(r, POWER + 1);
assert_eq!(res.len(), POWER + 1);
assert_eq!(res.last().unwrap(), &r.pow([POWER as u64]));
}
#[cfg(any(feature = "kzg", feature = "aplonk"))]
......
......@@ -11,30 +11,30 @@ use crate::aplonk::polynomial;
use crate::aplonk::transcript;
use crate::error::KomodoError;
/// holds the setup parameters of the IPA stage of [aPlonk from [Ambrona et al.]][aPlonK]
/// Holds the setup parameters of the IPA stage of [aPlonk from [Ambrona et al.]][aPlonK].
///
/// this can be found in [aPlonk from [Ambrona et al.]][aPlonK] in
/// This can be found in [aPlonk from [Ambrona et al.]][aPlonK] in
/// - page **13**. in Setup.1
/// - page **13**. in Setup.3
///
/// [aPlonk]: https://eprint.iacr.org/2022/1352.pdf
#[derive(Debug, Clone, PartialEq, Default, CanonicalSerialize, CanonicalDeserialize)]
pub struct Params<E: Pairing> {
/// *[\tau]_1* in the paper
/// $\[\tau\]_1$ in the paper
pub tau_1: E::G1,
/// *ck_\tau* in the paper
/// $\text{ck}_\tau$ in the paper
pub ck_tau: Vec<E::G2>,
}
/// holds all the necessary pieces to prove the IPA stage of [aPlonk from [Ambrona et al.]][aPlonK]
/// this can be found in [aPlonk from [Ambrona et al.]][aPlonK] as
/// *\pi = ({L_G^j, R_G^j, L_r^j, R_r^j}_{j \in [\kappa]}, \mu^0, G^0)* in
/// $\pi = ({L_G^j, R_G^j, L_r^j, R_r^j}_{j \in [\kappa]}, \mu^0, G^0)$ in
/// - page **15**. in IPA.Prove.10
///
/// > **Note**
/// > the notations are the same as in the paper, only with all letters in lower
/// > case and the powers at the bottom, e.g. `l_g_j` instead of *L_G^j*, and
/// > with *G* rename as `ck_tau`.
/// > case and the powers at the bottom, e.g. `l_g_j` instead of $L_G^j$, and
/// > with $G$ rename as `ck_tau`.
///
/// [aPlonk]: https://eprint.iacr.org/2022/1352.pdf
#[derive(Debug, Clone, Default, PartialEq, CanonicalSerialize, CanonicalDeserialize)]
......
sequenceDiagram
actor prover
actor verifier
Note over prover,verifier: generate trusted setup<br/>$$\ \text{TS} = (([\sigma^j]_1), [\tau]_1, \text{ck}_\tau)$$
Note over prover,verifier: $$\tau \leftarrow \mathbb{F}_p$$
Note over prover,verifier: $$\text{ck}_\tau = ([\tau^i]_2)$$
Note left of prover: $$(P_i) = \text{split}(\Delta)$$
Note left of prover: $$c = \text{com}(P_i, \text{TS})$$
Note left of prover: $$\pi_\alpha = \text{prove}(c, P_i, s_\alpha, \text{TS})$$
prover->>verifier: $$\pi_{\alpha} \text{, } (c_i)$$
Note right of verifier: $$\text{verify}(s_\alpha, \pi_\alpha, \text{TS})$$
......@@ -2,6 +2,8 @@
//!
//! > references:
//! > - [Ambrona et al., 2022](https://link.springer.com/chapter/10.1007/978-3-031-41326-1_11)
//!
#![doc = simple_mermaid::mermaid!("mod.mmd")]
use ark_ec::{
pairing::{Pairing, PairingOutput},
AffineRepr,
......@@ -16,7 +18,6 @@ use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress};
use ark_std::{test_rng, One, UniformRand};
use rs_merkle::algorithms::Sha256;
use rs_merkle::Hasher;
use std::marker::PhantomData;
use std::ops::{Div, Mul};
use crate::{
......@@ -31,28 +32,27 @@ mod polynomial;
mod transcript;
#[derive(Debug, Clone, Default, PartialEq, CanonicalSerialize, CanonicalDeserialize)]
/// Representation of an _aPlonK_ block.
pub struct Block<E: Pairing> {
pub shard: Shard<E::ScalarField>,
/// $\text{com}_f \in \mathbb{G}_T$
com_f: PairingOutput<E>,
/// $\hat{v} \in \mathbb{F}_p$
v_hat: E::ScalarField,
/// $\hat{\mu} \in \mathbb{G}_1$
mu_hat: E::G1,
/// $\pi_\text{KZG} \in \mathbb{G}_1$
kzg_proof: kzg10::Proof<E>,
/// $\pi_\text{IPA}$
ipa_proof: ipa::Proof<E>,
/// $\pi_{\text{aPlonK}} \in \mathbb{G}_2$
aplonk_proof: E::G2,
}
/// /!\ [`Commitment`] is not [`CanonicalDeserialize`] because `P` is not [`Send`].
#[derive(Debug, Clone, Default, PartialEq, CanonicalSerialize)]
pub struct Commitment<E, P>
where
E: Pairing,
P: DenseUVPolynomial<E::ScalarField, Point = E::ScalarField>,
for<'a, 'b> &'a P: Div<&'b P, Output = P>,
{
_engine: PhantomData<E>,
_poly: PhantomData<P>,
}
/// Representation of _aPlonK_'s parameters.
///
/// This is a wrapper around $\text{KZG}^+$ and IPA parameters.
///
/// /!\ [`SetupParams`] is not [`Default`] because [`kzg10::UniversalParams`] is not [`Default`].
#[derive(Debug, Clone, PartialEq, CanonicalSerialize, CanonicalDeserialize)]
pub struct SetupParams<E: Pairing> {
......@@ -60,18 +60,11 @@ pub struct SetupParams<E: Pairing> {
pub ipa: ipa::Params<E>,
}
#[derive(Debug, Clone, Default, PartialEq, CanonicalSerialize, CanonicalDeserialize)]
pub struct VerifierKey<E: Pairing> {
pub vk_psi: kzg10::VerifierKey<E>,
pub tau_1: E::G1,
pub g1: E::G1,
pub g2: E::G2,
}
/// creates a combination of a trusted KZG and an IPA setup for [[aPlonk]]
/// Creates a combination of a trusted KZG and an IPA setup for [[aPlonk]].
///
/// > **Note**
/// > this is an almost perfect translation of the *Setup* algorithm in page
/// > **Note**
/// >
/// > This is an almost perfect translation of the *Setup* algorithm in page
/// > **13** of [aPlonk from [Ambrona et al.]][aPlonK]
///
/// [aPlonk]: https://eprint.iacr.org/2022/1352.pdf
......@@ -105,6 +98,9 @@ where
})
}
/// Commits the polynomials.
///
/// [`commit`] actually computes $\mu$ and $\text{com}_f$.
pub fn commit<E, P>(
polynomials: Vec<P>,
setup: SetupParams<E>,
......@@ -151,6 +147,20 @@ where
Ok((mu, com_f))
}
/// Proves the whole data $\Delta$.
///
/// For each shard $s_\alpha$:
/// - $r = \text{hash}(\text{com}_f, \alpha)$
/// - $f = \sum r^i P_i$
/// - $\hat{\mu} = \sum r^i \mu_i$
/// - $\hat{v} = \sum r^i P_i(\alpha)$
/// - $\pi_{\text{KZG}} = \text{KZG.prove}(\text{TS}, f, \alpha)$
/// - $(\pi_{\text{IPA}}, u) = \text{IPA.prove}(\text{TS}, \text{com}_f, r, \hat{\mu}, \mu)$
/// - $\kappa = \log_2(m)$
/// - $G(X) = G(\kappa, u, u^{-1})$
/// - $\rho = \text{hash}(\pi_{\text{IPA}})$
/// - $H = \text{witness}(G, \rho)$
/// - $\pi_{\text{aPlonK}} = \sum [\tau^i\]_2 H_i$
pub fn prove<E, P>(
commit: (Vec<E::G1>, PairingOutput<E>),
polynomials: Vec<P>,
......@@ -281,6 +291,19 @@ where
Ok(proofs)
}
/// Verifies that a block is valid.
///
/// For a given shard $s_\alpha$:
/// - $r = \text{hash}(\text{com}_f, \alpha)$
/// - $\text{ok}_{\text{KZG}} = E(\hat{\mu} - \[\hat{v}\]_1, \[1\]_2) = E(\pi\_{\text{KZG}}, \[\sigma\]_2 - \[\alpha\]_2)$
/// - $\text{ok}_{\text{IPA}} = \text{IPA.verify'}(\text{com}_f, r, \hat{\mu}, \pi\_{\text{IPA}})$
/// - $\rho = \text{hash}(\pi_{\text{IPA}})$
/// - $\kappa = \log_2(m)$
/// - $u = \text{replay}(\text{com}_f, r, \hat{\mu})$
/// - $G(X) = G(\kappa, u, u^{-1})$
/// - $v_{\rho} = G(\rho)$
/// - $\text{ok}_{\text{aPlonK}} = E(\[\tau\]_1 - \[\rho\]_1, \pi\_{\text{aPlonK}}) = E(\[1\]_1, \pi\_{\text{IPA}}.\text{ck}\_{\tau,0})$
/// - assert $\text{ok}_{\text{KZG}}$, $\text{ok}\_{\text{IPA}}$ and $\text{ok}\_{\text{aPlonK}}$ are true
pub fn verify<E, P>(
block: &Block<E>,
pt: E::ScalarField,
......
......@@ -12,48 +12,48 @@ fn to_binary(number: usize, width: usize) -> Vec<u8> {
.collect()
}
/// compute the polynomial *g(X)* in [aPlonk from [Ambrona et al.]][aPlonk]
/// compute the polynomial $g(X)$ in [aPlonk from [Ambrona et al.]][aPlonk]
///
/// *g(X)* can be found, at
/// $g(X)$ can be found, at
/// - page **13**. in *open.7*
/// - page **13**. in *check.5*
/// - page **15**. in *IPA.verify.4*
///
/// it's theoretical formula is the following (modified version):
/// *g(X) = \Pi_{j=1}^{\kappa = log_2(k)}(u_j^{-1} + u_j X^{2^j})*
/// $g(X) = \Pi_{j=1}^{\kappa = log_2(k)}(u_j^{-1} + u_j X^{2^j})$
///
/// however this formula is not very convenient, so let's expand this and
/// compute all the coefficients!
/// when we do that on small examples:
/// - *\kappa = 1*: *
/// g(X) = (u_0^{-1} + u_0 X)
/// = u_0^{-1} +
/// u_0 X
/// *
/// - *\kappa = 2*: *
/// g(X) = (u_0^{-1} + u_0 X)(u_1^{-1} + u_1 X^2)
/// = u_1^{-1} u_0^{-1} +
/// u_1^{-1} u_0 X +
/// u_1 u_0^{-1} X^2 +
/// u_1 u_0 X^3
/// *
/// - *\kappa = 3*: *
/// g(X) = (u_0^{-1} + u_0 X)(u_1^{-1} + u_1 X^2)(u_2^{-1} + u_2 X^2)
/// = u_2^{-1} u_1^{-1} u_0^{-1} +
/// u_2^{-1} u_1^{-1} u_0 X +
/// u_2^{-1} u_1 u_0^{-1} X^2 +
/// u_2^{-1} u_1 u_0 X^3 +
/// u_2 u_1^{-1} u_0^{-1} X^4 +
/// u_2 u_1^{-1} u_0 X^5 +
/// u_2 u_1 u_0^{-1} X^6 +
/// u_2 u_1 u_0 X^7
/// *
/// - $\kappa = 1$: \begin{align} \begin{split}
/// g(X) &= (u_0^{-1} + u_0 X) \\\\
/// &=\quad u_0^{-1} + \\\\
/// &\quad+ u_0 X
/// \end{split} \end{align}
/// - $\kappa = 2$: \begin{align} \begin{split}
/// g(X) &= (u_0^{-1} + u_0 X)(u_1^{-1} + u_1 X^2) \\\\
/// &=\quad u_1^{-1} u_0^{-1} \\\\
/// &\quad+ u_1^{-1} u_0 X \\\\
/// &\quad+ u_1 u_0^{-1} X^2 \\\\
/// &\quad+ u_1 u_0 X^3
/// \end{split} \end{align}
/// - $\kappa = 3$: \begin{align} \begin{split}
/// g(X) &= (u_0^{-1} + u_0 X)(u_1^{-1} + u_1 X^2)(u_2^{-1} + u_2 X^2) \\\\
/// &=\quad u_2^{-1} u_1^{-1} u_0^{-1} \\\\
/// &\quad+ u_2^{-1} u_1^{-1} u_0 X \\\\
/// &\quad+ u_2^{-1} u_1 u_0^{-1} X^2 \\\\
/// &\quad+ u_2^{-1} u_1 u_0 X^3 \\\\
/// &\quad+ u_2 u_1^{-1} u_0^{-1} X^4 \\\\
/// &\quad+ u_2 u_1^{-1} u_0 X^5 \\\\
/// &\quad+ u_2 u_1 u_0^{-1} X^6 \\\\
/// &\quad+ u_2 u_1 u_0 X^7
/// \end{split} \end{align}
///
/// we can see that the *j*-the coefficient of *g(X)* for a given *\kappa* is
/// a product of a combination of *(u_i)* and their inverse elements directly
/// related to the binary representation of the *j* polynomial power, e.g.
/// - with *\kappa = 3* and *j = 6*, the binary is *110* and the coefficient is
/// *u_0 \times u_1 \times u_2^{-1}*
/// we can see that the $j$-the coefficient of $g(X)$ for a given $\kappa$ is
/// a product of a combination of $(u_i)$ and their inverse elements directly
/// related to the binary representation of the $j$ polynomial power, e.g.
/// - with $\kappa = 3$ and $j = 6$, the binary is $110$ and the coefficient is
/// $u_0 \times u_1 \times u_2^{-1}$
///
/// [aPlonk]: https://eprint.iacr.org/2022/1352.pdf
pub(super) fn compute_g<E, P>(
......
......@@ -5,7 +5,7 @@ use rs_merkle::{algorithms::Sha256, Hasher};
/// initialize the transcript of IPA
///
/// this can be found in [aPlonk from [Ambrona et al.]][aPlonK] as *ts := (C_G, r, P)* in
/// this can be found in [aPlonk from [Ambrona et al.]][aPlonK] as $ts := (C_G, r, P)$ in
/// - page **15**. in IPA.Prove.1.
///
/// [aPlonk]: https://eprint.iacr.org/2022/1352.pdf
......@@ -24,7 +24,7 @@ pub(super) fn initialize<E: Pairing>(
/// reset the transcript of IPA
///
/// this can be found in [aPlonk from [Ambrona et al.]][aPlonK] as *ts := u_j* in
/// this can be found in [aPlonk from [Ambrona et al.]][aPlonK] as $ts := u_j$ in
/// - page **15**. in IPA.Prove.5.
/// - page **15**. in IPA.Verify.3.
///
......@@ -39,7 +39,7 @@ pub(super) fn reset<E: Pairing>(u: E::ScalarField) -> Result<Vec<u8>, Serializat
/// hash curve elements into the transcript of IPA
///
/// this can be found in [aPlonk from [Ambrona et al.]][aPlonK] as
/// *ts := Hash(L_G^j, R_G^j, L_r^j, R_r^j, ts)* in
/// $ts := \text{hash}(L_G^j, R_G^j, L_r^j, R_r^j, ts)$ in
/// - page **15**. in IPA.Prove.5.
/// - page **15**. in IPA.Verify.3.
///
......
//! Komodo-specific errors
//!
//! there are a few linear algebra errors and some related to ZK.
//! There are a few linear algebra errors and some related to [crate::zk].
use thiserror::Error;
/// An error that Komodo could end up producing.
......@@ -11,40 +11,52 @@ use thiserror::Error;
/// - related to proving the shards
#[derive(Clone, Debug, Error, PartialEq)]
pub enum KomodoError {
/// `{0}` is a custom error message when a matrix is invalid.
#[error("Invalid matrix elements: {0}")]
InvalidMatrixElements(String),
#[error("expected rows to be of same length {expected}, found {found} at row {row}")]
InvalidMatrixElements {
expected: usize,
found: usize,
row: usize,
},
/// `{0}` and `{1}` are the shape of the rectangular matrix.
#[error("Matrix is not a square, ({0} x {1})")]
NonSquareMatrix(usize, usize),
/// `{0}` is the ID of the row where the matrix inversion failed.
#[error("Matrix is not invertible at row {0}")]
NonInvertibleMatrix(usize),
/// `{0}` and `{1}` are the shape of the left matrix and `{2}` and `{3}` are the shape of the
/// right matrix.
#[error("Matrices don't have compatible shapes: ({0} x {1}) and ({2} x {3})")]
IncompatibleMatrixShapes(usize, usize, usize, usize),
/// `{0}` and `{1}` are the IDs of the non-distinct _Vandermonde_ points and `{2}` is the list
/// of all the _Vandermonde_ points.
#[error("Matrices don't have compatible shapes: {left:?}, {right:?}")]
IncompatibleMatrixShapes {
left: (usize, usize),
right: (usize, usize),
},
#[error(
"Seed points of a Vandermonde matrix should be distinct: {0} and {1} are the same ({2})"
"Seed points of a Vandermonde matrix should be distinct: {first_index} and {second_index} are the same ({value_repr})"
)]
InvalidVandermonde(usize, usize, String),
InvalidVandermonde {
first_index: usize,
second_index: usize,
value_repr: String,
},
/// `{0}` is the actual number of shards and `{1}` is the expected amount.
#[error("Expected at least {1} shards, got {0}")]
TooFewShards(usize, usize),
/// `{0}` is a custom error message when shards are incompatible.
#[error("Shards are incompatible: {0}")]
IncompatibleShards(String),
/// `{0}` is a custom error message when blocks are incompatible.
#[error("Blocks are incompatible: {0}")]
IncompatibleBlocks(String),
#[error("Shards are incompatible ({key} is not the same at {index}: {left} vs {right})")]
IncompatibleShards {
key: String,
index: usize,
left: String,
right: String,
},
#[error("Blocks are incompatible ({key} is not the same at {index}: {left} vs {right})")]
IncompatibleBlocks {
key: String,
index: usize,
left: String,
right: String,
},
#[error("Degree is zero")]
DegreeIsZero,
/// `{0}` is the supported degree of the trusted setup and `{1}` is the actual requested
/// polynomnial degree
#[error("too many coefficients: max is {0}, found {0}")]
TooFewPowersInTrustedSetup(usize, usize),
#[error("too many coefficients: max is {powers}, found {coefficients}")]
TooFewPowersInTrustedSetup { powers: usize, coefficients: usize },
/// `{0}` is a custom error message.
#[error("Another error: {0}")]
Other(String),
......
flowchart LR
file@{ shape: rounded, label: "original file $$\\ F$$" }
source@{ shape: processes, label: "$$k\\ $$ source shards" }
encoded@{ shape: processes, label: "$$n\\ $$ encoded shards" }
gathered@{ shape: processes, label: "at least $$\\ k\\ $$ shards" }
decoded@{ shape: processes, label: "$$k\\ $$ decoded shards" }
reconstructed@{ shape: rounded, label: "reconstructed file $$\\ F^*$$" }
life_1@{ shape: framed-circle, label: "life" }
life_2@{ shape: framed-circle, label: "life" }
file --split--> source --"$$(k, n)\\ $$ encoding"--> encoded --disseminate--> life_1
life_2 --gather--> gathered --"$$(k, n)\\ $$ decoding"--> decoded --concat--> reconstructed
//! a module to encode, recode and decode shards of data with FEC methods.
//! A module to encode, recode and decode shards of data with [FEC] methods.
//!
//! In all the following, $(k, n)$ codes will be described, where $k$ is the number of source
//! shards and $n$ is the number of encoded shards.
//!
//! The _code ratio_ is defined as $\rho = \frac{k}{n}$.
//!
//! ## Example
//! In the following example, a file is encoded and decoded back.
//!
//! The dotted circle in between "_dissemination_" and "_gathering_" represents the "_life_" of the
//! shards, e.g. them being shared between peers on a network, recoded or lost.
#![doc = simple_mermaid::mermaid!("fec.mmd")]
//! In the end, [FEC] methods guarantee that $F^* = F$, as long as at least $k$ linearly
//! independant shards are gathered before decoding.
//!
//! [FEC]: https://en.wikipedia.org/wiki/Error_correction_code
use ark_ff::PrimeField;
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
......@@ -8,16 +24,18 @@ use rs_merkle::{algorithms::Sha256, Hasher};
use crate::{algebra, algebra::linalg::Matrix, error::KomodoError};
/// representation of a FEC shard of data.
/// Representation of a [FEC] shard of data.
///
/// [FEC]: https://en.wikipedia.org/wiki/Error_correction_code
#[derive(Debug, Default, Clone, PartialEq, CanonicalSerialize, CanonicalDeserialize)]
pub struct Shard<F: PrimeField> {
/// the code parameter, required to decode
pub k: u32,
/// tells the decoder how the shard was constructed with respect to the original source shards
/// tells the decoder how the shard was constructed with respect to the original source shards.
///
/// this effectively allows support for _recoding_.
/// This effectively allows support for _recoding_.
///
/// If we denote the $k$ source shards by $(s_i)_{0 \le i \lt k}$, the linear combination by $k$
/// If we denote the $k$ source shards by $(s\_i)\_\{0 \le i \lt k\}$, the linear combination by $k$
/// coefficients $(\alpha_i)_{0 \le i \lt k}$ and $s$ the shard itself, then
///
/// $$ s = \sum\limits_{i = 0}^{k - 1} \alpha_i s_i$$
......@@ -31,9 +49,9 @@ pub struct Shard<F: PrimeField> {
}
impl<F: PrimeField> Shard<F> {
/// compute the linear combination between two [`Shard`]s
/// Computes the linear combination between two [`Shard`]s.
///
/// if we denote the [`Shard`] itself and the other [`Shard`] by $s$ and $o$ respectively, the
/// If we denote the [`Shard`] itself and the other [`Shard`] by $s$ and $o$ respectively, the
/// output is
/// $$ \alpha s + \beta o $$
pub fn recode_with(&self, alpha: F, other: &Self, beta: F) -> Self {
......@@ -57,23 +75,23 @@ impl<F: PrimeField> Shard<F> {
.iter()
.zip(other.data.iter())
.map(|(es, eo)| es.mul(alpha) + eo.mul(beta))
.collect::<Vec<_>>(),
.collect(),
size: self.size,
}
}
}
/// compute the linear combination between an arbitrary number of [`Shard`]s
/// Computes the linear combination between an arbitrary number of [`Shard`]s.
///
/// > **Note**
/// >
/// > this is basically a multi-[`Shard`] wrapper around [`Shard::recode_with`]
/// > This is basically a multi-[`Shard`] wrapper around [`Shard::recode_with`].
/// >
/// > returns [`None`] if the number of shards is not the same as the number of
/// > coefficients or if no shards are provided.
/// > [`recode_with_coeffs`] will return [`None`] if the number of shards
/// > is not the same as the number of coefficients or if no shards are provided.
///
/// if the shards are the $(s_i)_{1 \le i \le n}$ and the coefficients the
/// $(\alpha_i)_{0 \le i \le n}$, then the output will be
/// If the shards are the $(s \_i)\_\{1 \le i \le n\}$ and the coefficients the
/// $(\alpha\_i)\_\{1 \le i \le n\}$, then the output will be
///
/// $$ \sum\limits_{i = 1}^{n} \alpha_i s_i$$
pub fn recode_with_coeffs<F: PrimeField>(shards: &[Shard<F>], coeffs: &[F]) -> Option<Shard<F>> {
......@@ -94,38 +112,44 @@ pub fn recode_with_coeffs<F: PrimeField>(shards: &[Shard<F>], coeffs: &[F]) -> O
Some(s)
}
/// compute a recoded shard from an arbitrary set of shards
/// Computes a recoded shard from an arbitrary set of compatible shards.
///
/// coefficients will be drawn at random, one for each shard.
/// Coefficients will be drawn at random, one for each shard.
///
/// if the shards appear to come from different data, e.g. if `k` is not the
/// If the shards appear to come from different data, e.g. if $k$ is not the
/// same or the hash of the data is different, an error will be returned.
///
/// > **Note**
/// >
/// > this is a wrapper around [`recode_with_coeffs`].
/// > This is a wrapper around [`recode_with_coeffs`].
pub fn recode_random<F: PrimeField>(
shards: &[Shard<F>],
rng: &mut impl RngCore,
) -> Result<Option<Shard<F>>, KomodoError> {
for (i, (s1, s2)) in shards.iter().zip(shards.iter().skip(1)).enumerate() {
if s1.k != s2.k {
return Err(KomodoError::IncompatibleShards(format!(
"k is not the same at {}: {} vs {}",
i, s1.k, s2.k
)));
return Err(KomodoError::IncompatibleShards {
key: "k".to_string(),
index: i,
left: s1.k.to_string(),
right: s2.k.to_string(),
});
}
if s1.hash != s2.hash {
return Err(KomodoError::IncompatibleShards(format!(
"hash is not the same at {}: {:?} vs {:?}",
i, s1.hash, s2.hash
)));
return Err(KomodoError::IncompatibleShards {
key: "hash".to_string(),
index: i,
left: format!("{:?}", s1.hash),
right: format!("{:?}", s2.hash),
});
}
if s1.size != s2.size {
return Err(KomodoError::IncompatibleShards(format!(
"size is not the same at {}: {} vs {}",
i, s1.size, s2.size
)));
return Err(KomodoError::IncompatibleShards {
key: "size".to_string(),
index: i,
left: s1.size.to_string(),
right: s2.size.to_string(),
});
}
}
......@@ -133,15 +157,21 @@ pub fn recode_random<F: PrimeField>(
Ok(recode_with_coeffs(shards, &coeffs))
}
/// applies a given encoding matrix to some data to generate encoded shards
/// Applies a given encoding matrix to some data to generate encoded shards.
///
/// We arrange the source shards to be encoded in an $m \times k$ matrix $S$, i.e. $k$ shards of
/// length $m$. The encoding matrix $M$ then is a $k \times n$ matrix and the encoded shards are
/// the $n$ columns of
///
/// $$E = S M$$
///
/// > **Note**
/// >
/// > the input data and the encoding matrix should have compatible shapes,
/// > The input data and the encoding matrix should have compatible shapes,
/// > otherwise, an error might be thrown to the caller.
///
/// Padding might be applied depending on the size of the data compared to the size of the encoding
/// matrix. (see [`algebra::split_data_into_field_elements`])
/// matrix, see [`algebra::split_data_into_field_elements`].
///
/// This is the inverse of [`decode`].
pub fn encode<F: PrimeField>(
......@@ -175,11 +205,18 @@ pub fn encode<F: PrimeField>(
.collect())
}
/// reconstruct the original data from a set of encoded, possibly recoded, shards
/// Reconstructs the original data from a set of encoded, possibly recoded, shards.
///
/// Let's assume at least $k$ linearly independant shards have been retrieved and put in a matrix
/// $\hat{E}$. We use the [linear combination][`Shard::linear_combination`] of each shard to
/// reconstruct the columns of the square submatrix $\hat{M}$ that has been used to encode these
/// shards. Then the reconstructed source shards $\hat{S}$ are given by
///
/// $$\hat{S} = \hat{M}^{-1} \hat{E}$$
///
/// > **Note**
/// >
/// > this function might fail in a variety of cases
/// > This function might fail in a variety of cases
/// > - if there are too few shards
/// > - if there are linear dependencies between shards
///
......@@ -248,7 +285,7 @@ mod tests {
/// `contains_one_of(x, set)` is true iif `x` fully contains one of the lists from `set`
///
/// > **Note**
/// > **Note**
/// >
/// > see [`containment`] for some example
fn contains_one_of(x: &[usize], set: &[Vec<usize>]) -> bool {
......
use ark_ff::PrimeField;
use ark_poly::DenseUVPolynomial;
use ark_std::ops::Div;
use rs_merkle::algorithms::Sha256;
use rs_merkle::Hasher;
use std::rc::Rc;
use tracing::{debug, info};
use crate::{algebra, error::KomodoError, fec};
use dragoonfri::{
frida::{FridaBuilder, FridaCommitment},
interpolation::interpolate_polynomials,
rng::FriChallenger,
utils::{to_evaluations, HasherExt, MerkleProof},
};
/// representation of a block of proven data.
///
/// this is a wrapper around a [`fec::Shard`] with some additional cryptographic
/// information that allows to prove the integrity of said shard.
#[derive(Clone, PartialEq)]
pub struct Block<F: PrimeField, H: Hasher> {
pub shard: fec::Shard<F>,
pub proof: MerkleProof<H>,
pub commit: Rc<FridaCommitment<F, H>>,
position: usize,
}
pub fn evaluate<F: PrimeField>(bytes: &[u8], k: usize, n: usize) -> Vec<Vec<F>> {
debug!("splitting bytes into rows");
let elements: Vec<F> = algebra::split_data_into_field_elements(bytes, k);
let rows = elements.chunks(k).map(|c| c.to_vec()).collect::<Vec<_>>();
info!(
"data is composed of {} rows and {} elements",
rows.len(),
elements.len()
);
rows.into_iter()
.map(|r| to_evaluations(r, n))
.collect::<Vec<_>>()
}
#[inline]
fn transpose<F: Copy>(v: Vec<Vec<F>>) -> Vec<Vec<F>> {
let mut cols: Vec<_> = Vec::<Vec<F>>::with_capacity(v[0].len());
for i in 0..v[0].len() {
cols.push((0..v.len()).map(|j| v[j][i]).collect());
}
cols
}
pub fn encode<F: PrimeField>(
bytes: &[u8],
evaluations: Vec<Vec<F>>,
k: usize,
) -> Vec<fec::Shard<F>> {
let hash = Sha256::hash(bytes).to_vec();
let n = evaluations[0].len();
let t = transpose(evaluations);
(0..n)
.map(|i| fec::Shard {
k: k as u32,
linear_combination: vec![],
hash: hash.clone(),
data: t[i].clone(),
size: bytes.len(),
})
.collect::<Vec<_>>()
}
pub fn prove<const N: usize, F: PrimeField, H: Hasher, P>(
evaluations: Vec<Vec<F>>,
shards: Vec<fec::Shard<F>>,
blowup_factor: usize,
remainder_plus_one: usize,
nb_queries: usize,
) -> Result<Vec<Block<F, H>>, KomodoError>
where
P: DenseUVPolynomial<F>,
for<'a, 'b> &'a P: Div<&'b P, Output = P>,
<H as rs_merkle::Hasher>::Hash: AsRef<[u8]>,
{
let builder = FridaBuilder::<F, H>::new::<N, _>(
&evaluations,
FriChallenger::<H>::default(),
blowup_factor,
remainder_plus_one,
nb_queries,
);
let commit = Rc::new(FridaCommitment::from(builder.clone()));
Ok(shards
.iter()
.enumerate()
.map(|(i, s)| Block {
shard: s.clone(),
proof: builder.prove_shards(&[i]),
commit: commit.clone(),
position: i,
})
.collect())
}
pub fn verify<const N: usize, F: PrimeField, H: Hasher, P>(
block: Block<F, H>,
domain_size: usize,
nb_queries: usize,
) -> Result<(), KomodoError>
where
P: DenseUVPolynomial<F>,
for<'a, 'b> &'a P: Div<&'b P, Output = P>,
<H as rs_merkle::Hasher>::Hash: AsRef<[u8]>,
{
block
.commit
.verify::<N, _>(
FriChallenger::<H>::default(),
nb_queries,
block.shard.k as usize,
domain_size,
)
.unwrap();
assert!(block.proof.verify(
block.commit.tree_root(),
&[block.position],
&[H::hash_item(&block.shard.data)],
domain_size,
));
Ok(())
}
pub fn decode<F: PrimeField, H: Hasher>(blocks: Vec<Block<F, H>>, n: usize) -> Vec<u8> {
let w = F::get_root_of_unity(n as u64).unwrap();
let t_shards = transpose(blocks.iter().map(|b| b.shard.data.clone()).collect());
let positions = blocks
.iter()
.map(|b| w.pow([b.position as u64]))
.collect::<Vec<_>>();
let source_shards = interpolate_polynomials(&t_shards, &positions)
.into_iter()
.flatten()
.collect::<Vec<_>>();
let mut bytes = algebra::merge_elements_into_bytes(&source_shards);
bytes.resize(blocks[0].shard.size, 0);
bytes
}
#[cfg(test)]
mod tests {
use ark_ff::PrimeField;
use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial};
use ark_serialize::CanonicalSerialize;
use ark_std::ops::Div;
use rs_merkle::Hasher;
use ark_bls12_381::Fr as F_BLS12_381;
use dragoonfri::{
algorithms::{Blake3, Sha3_256, Sha3_512},
dynamic_folding_factor,
};
use dragoonfri_test_utils::Fq as F_128;
use crate::error::KomodoError;
use super::{decode, encode, evaluate, prove, verify};
fn bytes() -> Vec<u8> {
include_bytes!("../assets/dragoon_133x133.png").to_vec()
}
fn run<const N: usize, F: PrimeField, H: Hasher, P>(
bytes: &[u8],
k: usize,
n: usize,
bf: usize,
rpo: usize,
q: usize,
) -> Result<(), KomodoError>
where
P: DenseUVPolynomial<F>,
for<'a, 'b> &'a P: Div<&'b P, Output = P>,
<H as rs_merkle::Hasher>::Hash: AsRef<[u8]> + CanonicalSerialize,
{
let evaluations = evaluate::<F>(bytes, k, n);
let evals = evaluations.clone();
let shards = encode::<F>(bytes, evals, k);
let blocks = prove::<N, F, H, P>(evaluations, shards, bf, rpo, q).unwrap();
for b in blocks.clone() {
verify::<N, F, H, P>(b, n, q).unwrap();
}
assert_eq!(decode::<F, H>(blocks[0..k].to_vec(), n), bytes);
Ok(())
}
macro_rules! run {
($n:tt, $f:ident, $h:ident) => {
dynamic_folding_factor!(
let N = $n => run::<N, $f, $h, DensePolynomial<$f>>
)
}
}
#[test]
fn end_to_end() {
for (ff, k, n, bf, rpo, q) in [(2, 4, 8, 2, 1, 50), (2, 4, 8, 2, 2, 50)] {
let _ = run!(ff, F_128, Blake3)(&bytes(), k, n, bf, rpo, q);
let _ = run!(ff, F_128, Sha3_256)(&bytes(), k, n, bf, rpo, q);
let _ = run!(ff, F_128, Sha3_512)(&bytes(), k, n, bf, rpo, q);
let _ = run!(ff, F_BLS12_381, Blake3)(&bytes(), k, n, bf, rpo, q);
let _ = run!(ff, F_BLS12_381, Sha3_256)(&bytes(), k, n, bf, rpo, q);
let _ = run!(ff, F_BLS12_381, Sha3_512)(&bytes(), k, n, bf, rpo, q);
}
}
}
//! interact with the filesystem, read from and write to it
//! Interact with the filesystem, read from and write to it.
use std::{
fs::File,
io::prelude::*,
......@@ -15,18 +15,18 @@ use tracing::info;
use crate::semi_avid::Block;
/// dump any serializable object to the disk
/// Dumps any serializable object to the disk.
///
/// - `dumpable` can be anything that is _serializable_
/// - if `filename` is provided, then it will be used as the filename as is
/// - otherwise, the hash of the _dumpable_ will be computed and used as the
/// filename
///
/// this function will return the name of the file the _dumpable_ has been
/// This function will return the name of the file the _dumpable_ has been
/// dumped to.
pub fn dump(
dumpable: &impl CanonicalSerialize,
dump_dir: &Path,
directory: &Path,
filename: Option<&str>,
compress: Compress,
) -> Result<String> {
......@@ -43,7 +43,7 @@ pub fn dump(
.join(""),
};
let dump_path = dump_dir.join(&filename);
let dump_path = directory.join(&filename);
info!("dumping dumpable into `{:?}`", dump_path);
let mut file = File::create(&dump_path)?;
......@@ -52,29 +52,29 @@ pub fn dump(
Ok(filename)
}
/// dump a bunch of blocks to the disk and return a JSON / NUON compatible list
/// of all the hashes that have been dumped
/// Dumps a bunch of blocks to the disk and returns a JSON / NUON compatible list
/// of all the hashes that have been dumped.
///
/// > **Note**
/// >
/// > this is a wrapper around [`dump`]
/// > This is a wrapper around [`dump`].
///
/// # Example
/// let's say we give three blocks to [`dump_blocks`] and their hashes are `aaaa`, `bbbb` and
/// Let's say we give three blocks to [`dump_blocks`] and their hashes are `aaaa`, `bbbb` and
/// `cccc` respectively, then this function will return
/// ```json
/// '["aaaa", "bbbb", "cccc"]'
/// ["aaaa", "bbbb", "cccc"]
/// ```
pub fn dump_blocks<F: PrimeField, G: CurveGroup<ScalarField = F>>(
blocks: &[Block<F, G>],
block_dir: &PathBuf,
directory: &PathBuf,
compress: Compress,
) -> Result<String> {
info!("dumping blocks to `{:?}`", block_dir);
info!("dumping blocks to `{:?}`", directory);
let mut hashes = vec![];
std::fs::create_dir_all(block_dir)?;
std::fs::create_dir_all(directory)?;
for block in blocks.iter() {
let hash = dump(block, block_dir, None, compress)?;
let hash = dump(block, directory, None, compress)?;
hashes.push(hash);
}
......@@ -87,30 +87,30 @@ pub fn dump_blocks<F: PrimeField, G: CurveGroup<ScalarField = F>>(
Ok(formatted_output)
}
/// read blocks from a list of block hashes
/// Reads blocks from a list of block hashes.
///
/// > **Note**
/// >
/// > this is a basically the inverse of [`dump_blocks`]
/// > This is a basically the inverse of [`dump_blocks`].
///
/// # Example
/// let's say we have three blocks `A`, `B` and `C` whose hashes are `aaaa`, `bbbb` and `cccc`
/// Let's say we have three blocks `A`, `B` and `C` whose hashes are `aaaa`, `bbbb` and `cccc`
/// respectively.
/// if one calls [`read_blocks`] with `aaaa` and `cccc` as the queried block hashes, the output of
/// If one calls [`read_blocks`] with `aaaa` and `cccc` as the queried block hashes, the output of
/// this function will be
/// ```ignore
/// Ok(vec![("aaaa", A), ("cccc", C)])
/// ```
pub fn read_blocks<F: PrimeField, G: CurveGroup<ScalarField = F>>(
block_hashes: &[String],
block_dir: &Path,
hashes: &[String],
directory: &Path,
compress: Compress,
validate: Validate,
) -> Result<Vec<(String, Block<F, G>)>> {
block_hashes
hashes
.iter()
.map(|f| {
let filename = block_dir.join(f);
let filename = directory.join(f);
let s = std::fs::read(filename)?;
Ok((
f.clone(),
......
sequenceDiagram
actor prover
actor verifier
Note over prover,verifier: generate trusted setup<br/>$$\ \text{TS} = ([\tau^j]_1)$$
Note left of prover: $$(P_i) = \text{split}(\Delta)$$
Note left of prover: $$c_i = \text{com}(P_i, \text{TS})$$
Note left of prover: $$r = H(P_0(\alpha)|...|P_{m-1}(\alpha))$$
Note left of prover: $$Q(X) = \sum\limits_i r^i P_i(X)$$
Note left of prover: $$\pi_\alpha = \text{KZG.prove}(Q, \text{TS})$$
prover->>verifier: $$\pi_{\alpha} \text{, } (c_i)$$
Note right of verifier: $$\ r = H(s_0|...|s_{m - 1})$$
Note right of verifier: $$\ y = \sum\limits_i r^i s_{\alpha, i}$$
Note right of verifier: $$\ c = \sum\limits_i r^i c_i$$
Note right of verifier: assert $$\ E(c - [y]_1, [1]_2) = E(\pi_{\alpha}, [\tau]_2 - [\alpha]_2)$$