From 15caa7ed2b1bb2410953b23e2a2d3e1d71ebef99 Mon Sep 17 00:00:00 2001 From: Haofei Liang Date: Sun, 1 Sep 2024 12:00:18 +0800 Subject: [PATCH] Remake ci (#145) * remake actions * minor update * fix * fix * fix doc test * modify dependencies * typos * clippy * clippy auxiliary * clippy --- .github/workflows/rust.yml | 219 +++-- Cargo.toml | 7 +- algebra/Cargo.toml | 6 +- algebra/src/field/mod.rs | 2 +- algebra/src/field/ntt_fields.rs | 2 +- .../src/modulus/barrett/internal_macros.rs | 20 +- algebra/src/modulus/barrett/mod.rs | 2 +- .../multivariate/data_structures.rs | 4 +- .../multivariate/multilinear/dense.rs | 4 +- .../univariate/native_polynomial.rs | 2 +- .../polynomial/univariate/ntt_polynomial.rs | 12 +- algebra/src/reduce/lazy_ops.rs | 4 +- algebra/src/reduce/ops.rs | 8 +- algebra/src/utils/mod.rs | 2 +- algebra/tests/derive_test.rs | 790 +++++++++--------- algebra/tests/extension_test.rs | 2 + fhe_core/Cargo.toml | 2 +- fhe_core/src/key_switch/lwe.rs | 2 +- fhe_core/src/key_switch/rlwe.rs | 2 +- fhe_core/src/parameter.rs | 2 +- fhe_core/src/secret_key.rs | 2 +- lattice/Cargo.toml | 2 +- pcs/src/multilinear/brakedown/mod.rs | 2 +- pcs/src/utils/arithmetic.rs | 4 +- pcs/src/utils/code/mod.rs | 4 +- pcs/src/utils/merkle_tree.rs | 2 +- zkfhe/benches/zk_bfhe.rs | 4 +- zkfhe/benches/zk_bnfhe.rs | 4 +- zkfhe/examples/count_ntt.rs | 4 +- zkfhe/examples/zk_bfhe.rs | 10 +- zkfhe/examples/zk_bnfhe.rs | 10 +- zkfhe/src/bfhe/mod.rs | 2 +- zkfhe/src/bfhe/parameters.rs | 4 +- zkfhe/src/ntru_bfhe/mod.rs | 2 +- zkfhe/src/ntru_bfhe/parameters.rs | 2 +- zkp/Cargo.toml | 10 +- zkp/src/piop/accumulator.rs | 9 +- zkp/src/piop/ntt/mod.rs | 18 +- zkp/src/piop/ntt/ntt_bare.rs | 4 +- zkp/src/piop/rlwe_mul_rgsw.rs | 8 +- zkp/src/piop/round.rs | 2 +- zkp/src/sumcheck/prover.rs | 2 +- zkp/tests/test_accumulator.rs | 2 +- zkp/tests/test_addition_in_zq.rs | 4 +- zkp/tests/test_ntt.rs | 6 +- zkp/tests/test_rlwe_mult_rgsw.rs | 2 +- 46 files changed, 647 insertions(+), 572 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index a3e2b553..c8311f38 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -1,103 +1,166 @@ -name: Rust +env: + # We aim to always test with the latest stable Rust toolchain, however we pin to a specific + # version like 1.70. Note that we only specify MAJOR.MINOR and not PATCH so that bugfixes still + # come automatically. If the version specified here is no longer the latest stable version, + # then please feel free to submit a PR that adjusts it along with the potential clippy fixes. + RUST_STABLE_VER: "1.80" # In quotes because otherwise (e.g.) 1.70 would be interpreted as 1.7 + # The purpose of checking with the minimum supported Rust toolchain is to detect its staleness. + # If the compilation fails, then the version specified here needs to be bumped up to reality. + # Be sure to also update the rust-version property in the workspace Cargo.toml file, + # plus all the README.md files of the affected packages. + RUST_MIN_VER: "1.79" + # List of packages that will be checked with the minimum supported Rust version. + # This should be limited to packages that are intended for publishing. + RUST_MIN_VER_PKGS: "-p algebra -p lattice -p fhe_core -p zkfhe" + CARGO_TERM_COLOR: always + +name: CI on: workflow_dispatch: - push: - branches: ["main"] pull_request: - branches: ["main"] - -env: - CARGO_TERM_COLOR: always + merge_group: + # We run on push, even though the commit is the same as when we ran in merge_group. + # This allows the cache to be primed. + # See https://github.com/orgs/community/discussions/66430 + push: + branches: + - main jobs: - compatibility_check: - name: Check with compatibility requirement + fmt: + name: formatting runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@master + + - name: install stable toolchain + uses: dtolnay/rust-toolchain@master with: - toolchain: stable 6 months ago - components: rustfmt, clippy - # - uses: Swatinem/rust-cache@v2 - - run: cargo fmt --all --check - - run: cargo check --workspace --all-targets --no-default-features --features="count_ntt" - - run: cargo check --workspace --all-targets --no-default-features --features="concrete-ntt" - - run: cargo clippy --no-deps --all-targets --no-default-features --features="count_ntt" -- -D warnings - - run: cargo clippy --no-deps --all-targets --no-default-features --features="concrete-ntt" -- -D warnings - - # check: - # name: Basic check - # runs-on: ubuntu-latest - # steps: - # - uses: actions/checkout@v4 - # - uses: dtolnay/rust-toolchain@stable - # with: - # components: rustfmt, clippy - # # - uses: Swatinem/rust-cache@v2 - # - run: cargo fmt --all --check - # - run: cargo check --workspace --all-targets --no-default-features --features="count_ntt" - # - run: cargo clippy --no-deps --all-targets --no-default-features --features="count_ntt" -- -D warnings + toolchain: ${{ env.RUST_STABLE_VER }} + components: rustfmt - doc: - name: Document - runs-on: ubuntu-latest + - name: cargo fmt + run: cargo fmt --all --check + + clippy-stable: + name: cargo clippy + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [windows-latest, macos-latest, ubuntu-latest] steps: - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@stable - # - uses: Swatinem/rust-cache@v2 - - run: cargo test --workspace --doc --features="count_ntt concrete-ntt" - - run: cargo doc --no-deps --workspace --lib --document-private-items --no-default-features --features="count_ntt concrete-ntt" - test: + - name: restore cache + uses: Swatinem/rust-cache@v2 + with: + save-if: ${{ github.event_name != 'merge_group' }} + + - name: install stable toolchain + uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ env.RUST_STABLE_VER }} + components: clippy + + - name: install cargo-hack + uses: taiki-e/install-action@v2 + with: + tool: cargo-hack + + - name: cargo clippy + run: cargo hack clippy --workspace --each-feature --skip nightly --optional-deps -- -D warnings + + - name: cargo clippy (auxiliary) + run: cargo hack clippy --workspace --each-feature --skip nightly --optional-deps --tests --benches --examples -- -D warnings + + test-stable: + name: cargo test + runs-on: ${{ matrix.os }} strategy: + fail-fast: false matrix: - os: [ubuntu-latest, windows-latest, macos-latest] - # os: [ubuntu-latest] - name: Test - runs-on: ${{ matrix.os }} + os: [windows-latest, macos-latest, ubuntu-latest] steps: - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@master + # We intentionally do not use lfs: true here, instead using the caching method to save LFS bandwidth. + + - name: restore cache + uses: Swatinem/rust-cache@v2 with: - toolchain: stable 6 months ago - # - uses: Swatinem/rust-cache@v2 - - run: cargo test --workspace --lib --bins --tests --examples --no-default-features --features="count_ntt" - - run: cargo test --workspace --lib --bins --tests --examples --no-default-features --features="concrete-ntt" + save-if: ${{ github.event_name != 'merge_group' }} - build: + - name: install stable toolchain + uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ env.RUST_STABLE_VER }} + + - name: Install cargo-nextest + uses: taiki-e/install-action@v2 + with: + tool: cargo-nextest + + - name: cargo test + run: cargo nextest run --workspace --no-default-features + + - name: cargo test + run: cargo nextest run --workspace + + check-msrv: + name: cargo check (msrv) + runs-on: ${{ matrix.os }} strategy: matrix: - platform: - - os: ubuntu-latest - target: x86_64-unknown-linux-gnu - command: build - - - os: windows-latest - target: x86_64-pc-windows-msvc - command: build - - # - os: macos-latest - # target: x86_64-apple-darwin - # command: build - - # - os: ubuntu-latest - # target: aarch64-unknown-linux-gnu - # command: build - - - os: macos-latest - target: aarch64-apple-darwin - command: build - name: Build - runs-on: ${{ matrix.platform.os }} + os: [windows-latest, macos-latest, ubuntu-latest] + steps: + - uses: actions/checkout@v4 + + - name: restore cache + uses: Swatinem/rust-cache@v2 + with: + save-if: ${{ github.event_name != 'merge_group' }} + + - name: install msrv toolchain + uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ env.RUST_MIN_VER }} + + - name: install cargo-hack + uses: taiki-e/install-action@v2 + with: + tool: cargo-hack + + - name: cargo check + run: cargo hack check ${{ env.RUST_MIN_VER_PKGS }} --each-feature --skip nightly --optional-deps + + doc: + name: cargo doc + # NOTE: We don't have any platform specific docs in this workspace, so we only run on Ubuntu. + # If we get per-platform docs (win/macos/linux/wasm32/..) then doc jobs should match that. + runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - name: Build binary - uses: houseabsolute/actions-rust-cross@v0 + + - name: restore cache + uses: Swatinem/rust-cache@v2 with: - command: ${{ matrix.platform.command }} - target: ${{ matrix.platform.target }} - # args: "--locked --release" - # strip: true - - uses: Swatinem/rust-cache@v2 + save-if: ${{ github.event_name != 'merge_group' }} + + - name: install nightly toolchain + uses: dtolnay/rust-toolchain@nightly + + - name: cargo test doc + run: cargo test --doc --workspace --all-features + + # We test documentation using nightly to match docs.rs. This prevents potential breakages + - name: cargo doc + run: cargo doc --workspace --no-default-features --features="count_ntt concrete-ntt" --no-deps --document-private-items -Zunstable-options -Zrustdoc-scrape-examples + + # If this fails, consider changing your text or adding something to .typos.toml + typos: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: check typos + uses: crate-ci/typos@v1.23.2 diff --git a/Cargo.toml b/Cargo.toml index 460cc940..22b8f037 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,13 +20,14 @@ once_cell = "1.19" rand = "0.8" rand_distr = "0.4" rand_core = "0.6.4" +rand_chacha = "0.3.1" rayon = "1" -bytemuck = { version = "1.16", features = ["derive"] } +bytemuck = { version = "1.17", features = ["derive"] } merlin = { version = "3.0.0", default-features = false } serde = { version = "1.0", features = ["derive"] } bincode = "1.3" -itertools = "0.13.0" -sha2 = { version = "0.10.8" } +itertools = "0.13" +sha2 = { version = "0.10" } criterion = "0.5" diff --git a/algebra/Cargo.toml b/algebra/Cargo.toml index d78e8805..92999afd 100644 --- a/algebra/Cargo.toml +++ b/algebra/Cargo.toml @@ -12,14 +12,14 @@ thiserror = { workspace = true } num-traits = { workspace = true } once_cell = { workspace = true } rand = { workspace = true } +rand_core = { workspace = true } rand_distr = { workspace = true } bytemuck = { workspace = true } -rand_core = { workspace = true } merlin = { workspace = true } serde = { workspace = true } bincode = { workspace = true } concrete-ntt = { git = "https://github.com/pado-labs/concrete-ntt", branch = "main", optional = true } -itertools = "0.13.0" +itertools = { workspace = true } [features] default = ["concrete-ntt"] @@ -29,7 +29,7 @@ count_ntt = [] [dev-dependencies] criterion = { workspace = true } -rand_chacha = "0.3.1" +rand_chacha = { workspace = true } [[bench]] name = "ntt_bench" diff --git a/algebra/src/field/mod.rs b/algebra/src/field/mod.rs index 94124bd0..3c8de0d4 100644 --- a/algebra/src/field/mod.rs +++ b/algebra/src/field/mod.rs @@ -147,7 +147,7 @@ pub trait DecomposableField: Field { /// A trait defined for specific fields used and optimized for FHE. pub trait FheField: DecomposableField { /// Creates a new instance. - /// Can be overloaded with optimized implemetation. + /// Can be overloaded with optimized implementation. #[inline] fn lazy_new(value: Self::Value) -> Self { Self::new(value) diff --git a/algebra/src/field/ntt_fields.rs b/algebra/src/field/ntt_fields.rs index 554ce377..237f08c2 100644 --- a/algebra/src/field/ntt_fields.rs +++ b/algebra/src/field/ntt_fields.rs @@ -46,7 +46,7 @@ pub trait NTTField: PrimeField + FheField + From { /// Try to get the minimal primitive `degree`-th root of unity reduce `p`. /// - /// For ease of introduction we use `n` for `degreee` and `p` for prime number. + /// For ease of introduction we use `n` for `degree` and `p` for prime number. /// /// Let `n` be a power of 2 and `p` a prime with `p ≡ 1 (mod 2n)`. /// diff --git a/algebra/src/modulus/barrett/internal_macros.rs b/algebra/src/modulus/barrett/internal_macros.rs index 2863d64e..06624f52 100644 --- a/algebra/src/modulus/barrett/internal_macros.rs +++ b/algebra/src/modulus/barrett/internal_macros.rs @@ -79,7 +79,7 @@ macro_rules! impl_barrett_modulus { impl $crate::reduce::LazyReduce> for $SelfT { type Output = Self; - /// Caculates `self (mod 2*modulus)`. + /// Calculates `self (mod 2*modulus)`. /// /// ## Procedure /// @@ -133,7 +133,7 @@ macro_rules! impl_barrett_modulus { impl $crate::reduce::Reduce> for $SelfT { type Output = Self; - /// Caculates `self (mod 2*modulus)`. + /// Calculates `self (mod 2*modulus)`. /// /// ## Procedure /// @@ -173,7 +173,7 @@ macro_rules! impl_barrett_modulus { impl $crate::reduce::LazyReduce> for [$SelfT; 2] { type Output = $SelfT; - /// Caculates `self (mod 2*modulus)`. + /// Calculates `self (mod 2*modulus)`. /// /// ## Procedure /// @@ -237,7 +237,7 @@ macro_rules! impl_barrett_modulus { impl $crate::reduce::Reduce> for [$SelfT; 2] { type Output = $SelfT; - /// Caculates `self (mod modulus)`. + /// Calculates `self (mod modulus)`. /// /// ## Procedure /// @@ -277,7 +277,7 @@ macro_rules! impl_barrett_modulus { impl $crate::reduce::LazyReduce> for ($SelfT, $SelfT) { type Output = $SelfT; - /// Caculates `self (mod 2*modulus)`. + /// Calculates `self (mod 2*modulus)`. /// /// ## Procedure /// @@ -341,7 +341,7 @@ macro_rules! impl_barrett_modulus { impl $crate::reduce::Reduce> for ($SelfT, $SelfT) { type Output = $SelfT; - /// Caculates `self (mod modulus)`. + /// Calculates `self (mod modulus)`. /// /// ## Procedure /// @@ -381,7 +381,7 @@ macro_rules! impl_barrett_modulus { impl $crate::reduce::LazyReduce> for &[$SelfT] { type Output = $SelfT; - /// Caculates `self (mod 2*modulus)` when value's length > 0. + /// Calculates `self (mod 2*modulus)` when value's length > 0. fn lazy_reduce(self, modulus: BarrettModulus<$SelfT>) -> Self::Output { match self { &[] => unreachable!(), @@ -402,7 +402,7 @@ macro_rules! impl_barrett_modulus { impl $crate::reduce::Reduce> for &[$SelfT] { type Output = $SelfT; - /// Caculates `self (mod modulus)` when value's length > 0. + /// Calculates `self (mod modulus)` when value's length > 0. fn reduce(self, modulus: BarrettModulus<$SelfT>) -> Self::Output { match self { &[] => unreachable!(), @@ -421,7 +421,7 @@ macro_rules! impl_barrett_modulus { } impl $crate::reduce::LazyReduceAssign> for $SelfT { - /// Caculates `self (mod 2*modulus)`. + /// Calculates `self (mod 2*modulus)`. /// /// ## Procedure /// @@ -453,7 +453,7 @@ macro_rules! impl_barrett_modulus { } impl $crate::reduce::ReduceAssign> for $SelfT { - /// Caculates `self (mod modulus)`. + /// Calculates `self (mod modulus)`. /// /// ## Procedure /// diff --git a/algebra/src/modulus/barrett/mod.rs b/algebra/src/modulus/barrett/mod.rs index e455d080..da2085af 100644 --- a/algebra/src/modulus/barrett/mod.rs +++ b/algebra/src/modulus/barrett/mod.rs @@ -4,7 +4,7 @@ //! Barrett reduction computes `r ≡ x mod m` given `x` and `m` //! and return `r` where `r < m`. //! -//! Fisrt, we need decide the radix `b`, which is chosen to be close to +//! First, we need decide the radix `b`, which is chosen to be close to //! the word-size of the processor. Here, `b` = 2^64. //! //! The algorithm then precomputes a quantity ratio `µ = ⌊b^(2k)/m⌋`, diff --git a/algebra/src/polynomial/multivariate/data_structures.rs b/algebra/src/polynomial/multivariate/data_structures.rs index 76f373ed..2c1579e2 100644 --- a/algebra/src/polynomial/multivariate/data_structures.rs +++ b/algebra/src/polynomial/multivariate/data_structures.rs @@ -28,7 +28,7 @@ pub struct ListOfProductsOfPolynomials { pub num_variables: usize, /// list of reference to products (as usize) of multilinear extension pub products: Vec<(F, Vec)>, - /// Stores the linear operations, each of which is successively (in the same order) perfomed over the each MLE of each product stored in the above `products` + /// Stores the linear operations, each of which is successively (in the same order) performed over the each MLE of each product stored in the above `products` /// so each (a: F, b: F) can used to wrap a linear operation over the original MLE f, i.e. a \cdot f + b pub linear_ops: Vec>, /// Stores multilinear extensions in which product multiplicand can refer to. @@ -90,7 +90,7 @@ impl ListOfProductsOfPolynomials { let mut indexed_product: Vec = Vec::with_capacity(op_coefficient.len()); let mut linear_ops = Vec::with_capacity(op_coefficient.len()); - // (a, b) is the linear operation perfomed on the original MLE pointed by m + // (a, b) is the linear operation performed on the original MLE pointed by m for (m, (a, b)) in product.iter().zip(op_coefficient) { assert_eq!( m.num_vars, self.num_variables, diff --git a/algebra/src/polynomial/multivariate/multilinear/dense.rs b/algebra/src/polynomial/multivariate/multilinear/dense.rs index 079cad8c..865492a5 100644 --- a/algebra/src/polynomial/multivariate/multilinear/dense.rs +++ b/algebra/src/polynomial/multivariate/multilinear/dense.rs @@ -112,10 +112,10 @@ impl DenseMultilinearExtension { } impl DenseMultilinearExtension { - /// Decompose bits of each evaluation of the origianl MLE. + /// Decompose bits of each evaluation of the original MLE. /// The bit deomposition is only applied for power-of-two base. /// * base_len: the length of base, i.e. log_2(base) - /// * bits_len: the lenth of decomposed bits + /// * bits_len: the length of decomposed bits /// /// The resulting decomposition bits are respectively wrapped into `Rc` struct, which can be more easilier added into the ListsOfProducts. #[inline] diff --git a/algebra/src/polynomial/univariate/native_polynomial.rs b/algebra/src/polynomial/univariate/native_polynomial.rs index 9f629d0a..aae73074 100644 --- a/algebra/src/polynomial/univariate/native_polynomial.rs +++ b/algebra/src/polynomial/univariate/native_polynomial.rs @@ -694,7 +694,7 @@ impl Inv for &Polynomial { } } -/// Performs enrty-wise fast mul operation. +/// Performs entry-wise fast mul operation. /// /// The result coefficients may be in [0, 2*modulus) for some case, /// and fall back to [0, modulus) for normal case. diff --git a/algebra/src/polynomial/univariate/ntt_polynomial.rs b/algebra/src/polynomial/univariate/ntt_polynomial.rs index d25c7555..7e8d1738 100644 --- a/algebra/src/polynomial/univariate/ntt_polynomial.rs +++ b/algebra/src/polynomial/univariate/ntt_polynomial.rs @@ -575,9 +575,9 @@ impl Inv for &NTTPolynomial { } } -/// Performs enrty-wise add_mul operation. +/// Performs entry-wise add_mul operation. /// -/// Multiply enrty-wise over last two [NTTPolynomial], and add back to the first +/// Multiply entry-wise over last two [NTTPolynomial], and add back to the first /// [NTTPolynomial]. #[inline] pub fn ntt_add_mul_assign( @@ -591,9 +591,9 @@ pub fn ntt_add_mul_assign( .for_each(|((a, &b), &c)| a.add_mul_assign(b, c)); } -/// Performs enrty-wise add_mul operation. +/// Performs entry-wise add_mul operation. /// -/// Multiply enrty-wise over middle two [NTTPolynomial], and add the first +/// Multiply entry-wise over middle two [NTTPolynomial], and add the first /// [NTTPolynomial], store the result to last [NTTPolynomial]. #[inline] pub fn ntt_add_mul_inplace( @@ -609,9 +609,9 @@ pub fn ntt_add_mul_inplace( .for_each(|(((d, &a), &b), &c)| *d = a.add_mul(b, c)); } -/// Performs enrty-wise add_mul fast operation. +/// Performs entry-wise add_mul fast operation. /// -/// Multiply enrty-wise over last two [NTTPolynomial], and add back to the first +/// Multiply entry-wise over last two [NTTPolynomial], and add back to the first /// [NTTPolynomial]. /// /// The result coefficients may be in [0, 2*modulus) for some case, diff --git a/algebra/src/reduce/lazy_ops.rs b/algebra/src/reduce/lazy_ops.rs index 58ddcc1c..3f5b14de 100644 --- a/algebra/src/reduce/lazy_ops.rs +++ b/algebra/src/reduce/lazy_ops.rs @@ -3,7 +3,7 @@ pub trait LazyReduce: Sized { /// Output type. type Output; - /// Caculates `self (mod 2*modulus)`. + /// Calculates `self (mod 2*modulus)`. /// /// If `Modulus` doesn't support this special case, /// just fall back to `Reduce` trait. @@ -12,7 +12,7 @@ pub trait LazyReduce: Sized { /// The lazy modulo assignment operation. pub trait LazyReduceAssign: Sized { - /// Caculates `self (mod 2*modulus)`. + /// Calculates `self (mod 2*modulus)`. /// /// If `Modulus` doesn't support this special case, /// just fall back to `ReduceAssign` trait. diff --git a/algebra/src/reduce/ops.rs b/algebra/src/reduce/ops.rs index 9b9eefce..49964654 100644 --- a/algebra/src/reduce/ops.rs +++ b/algebra/src/reduce/ops.rs @@ -5,13 +5,13 @@ pub trait Reduce: Sized { /// Output type. type Output; - /// Caculates `self (mod modulus)`. + /// Calculates `self (mod modulus)`. fn reduce(self, modulus: Modulus) -> Self::Output; } /// The modulo assignment operation. pub trait ReduceAssign: Sized { - /// Caculates `self (mod modulus)`. + /// Calculates `self (mod modulus)`. fn reduce_assign(&mut self, modulus: Modulus); } @@ -113,7 +113,7 @@ pub trait MulReduceAssign { /// The modular exponentiation. pub trait PowReduce { - /// Calcualtes `self^exp (mod modulus)`. + /// Calculates `self^exp (mod modulus)`. fn pow_reduce(self, exp: Exponent, modulus: Modulus) -> Self; } @@ -165,7 +165,7 @@ pub trait DotProductReduce: Sized { /// Output type. type Output; - /// Calcualte `a • b mod modulus` + /// Calculate `a • b mod modulus` /// /// For two same length slice `a = (a0, a1, ..., an)` and `b = (b0, b1, ..., bn)`. /// diff --git a/algebra/src/utils/mod.rs b/algebra/src/utils/mod.rs index fde01476..5f0497e6 100644 --- a/algebra/src/utils/mod.rs +++ b/algebra/src/utils/mod.rs @@ -1,4 +1,4 @@ -//! Implemention of some number theory operation. +//! Implementation of some number theory operation. mod aes; mod block; diff --git a/algebra/tests/derive_test.rs b/algebra/tests/derive_test.rs index df0b6969..5e55a32c 100644 --- a/algebra/tests/derive_test.rs +++ b/algebra/tests/derive_test.rs @@ -1,402 +1,400 @@ -use algebra::derive::{DecomposableField, FheField, Field, Prime, NTT}; +use algebra::{ + derive::{DecomposableField, FheField, Field, Prime, NTT}, + modulus::BarrettModulus, + reduce::*, + Basis, DecomposableField, Field, FieldUniformSampler, ModulusConfig, PrimeField, +}; +use num_traits::{Inv, One, Zero}; +use rand::{distributions::Uniform, thread_rng, Rng}; +use rand_distr::Distribution; #[derive(Field, DecomposableField, FheField, Prime, NTT)] #[modulus = 132120577] pub struct Fp32(u32); -#[cfg(test)] -mod tests { - use super::*; - - use algebra::{ - modulus::{ - from_monty, to_canonical_u64, to_monty, BabyBearModulus, BarrettModulus, - GoldilocksModulus, - }, - reduce::*, - BabyBear, Basis, DecomposableField, Field, FieldUniformSampler, Goldilocks, ModulusConfig, - PrimeField, - }; - use num_traits::{Inv, One, Zero}; - use rand::{distributions::Uniform, thread_rng, Rng}; - use rand_distr::Distribution; - - type FF = Fp32; - type T = u32; - type W = u64; - - #[test] - fn test_fp() { - let p = FF::MODULUS.value(); - - let distr = Uniform::new(0, p); - let mut rng = thread_rng(); - - assert!(FF::is_prime_field()); - - // add - let a = rng.sample(distr); - let b = rng.sample(distr); - let c = (a + b) % p; - assert_eq!(FF::new(a) + FF::new(b), FF::new(c)); - - // add_assign - let mut a = FF::new(a); - a += FF::new(b); - assert_eq!(a, FF::new(c)); - - // sub - let a = rng.sample(distr); - let b = rng.gen_range(0..=a); - let c = (a - b) % p; - assert_eq!(FF::new(a) - FF::new(b), FF::new(c)); - - // sub_assign - let mut a = FF::new(a); - a -= FF::new(b); - assert_eq!(a, FF::new(c)); - - // mul - let a = rng.sample(distr); - let b = rng.sample(distr); - let c = ((a as W * b as W) % p as W) as T; - assert_eq!(FF::new(a) * FF::new(b), FF::new(c)); - - // mul_assign - let mut a = FF::new(a); - a *= FF::new(b); - assert_eq!(a, FF::new(c)); - - // div - let a = rng.sample(distr); - let b = rng.sample(distr); - let b_inv = b.pow_reduce(p - 2, BarrettModulus::::new(p)); - let c = ((a as W * b_inv as W) % p as W) as T; - assert_eq!(FF::new(a) / FF::new(b), FF::new(c)); - - // div_assign - let mut a = FF::new(a); - a /= FF::new(b); - assert_eq!(a, FF::new(c)); - - // neg - let a = rng.sample(distr); - let a_neg = -FF::new(a); - assert_eq!(FF::new(a) + a_neg, FF::zero()); - - let a = FF::zero(); - assert_eq!(a, -a); - - // inv - let a = rng.sample(distr); - let a_inv = a.pow_reduce(p - 2, BarrettModulus::::new(p)); - assert_eq!(FF::new(a).inv(), FF::new(a_inv)); - assert_eq!(FF::new(a) * FF::new(a_inv), FF::one()); - - // associative - let a = rng.sample(distr); - let b = rng.sample(distr); - let c = rng.sample(distr); - assert_eq!( - (FF::new(a) + FF::new(b)) + FF::new(c), - FF::new(a) + (FF::new(b) + FF::new(c)) - ); - assert_eq!( - (FF::new(a) * FF::new(b)) * FF::new(c), - FF::new(a) * (FF::new(b) * FF::new(c)) - ); - - // commutative - let a = rng.sample(distr); - let b = rng.sample(distr); - assert_eq!(FF::new(a) + FF::new(b), FF::new(b) + FF::new(a)); - assert_eq!(FF::new(a) * FF::new(b), FF::new(b) * FF::new(a)); - - // identity - let a = rng.sample(distr); - assert_eq!(FF::new(a) + FF::new(0), FF::new(a)); - assert_eq!(FF::new(a) * FF::new(1), FF::new(a)); - - // distribute - let a = rng.sample(distr); - let b = rng.sample(distr); - let c = rng.sample(distr); - assert_eq!( - (FF::new(a) + FF::new(b)) * FF::new(c), - (FF::new(a) * FF::new(c)) + (FF::new(b) * FF::new(c)) - ); - } - - #[test] - fn test_decompose() { - const BITS: u32 = 2; - const B: u32 = 1 << BITS; - let basis = >::new(BITS); - let rng = &mut thread_rng(); - - let uniform = >::new(); - let a: FF = uniform.sample(rng); - let decompose = a.decompose(basis); - let compose = decompose - .into_iter() - .enumerate() - .fold(FF::new(0), |acc, (i, d)| { - acc + d * FF::new(B.pow(i as T) as T) - }); - - assert_eq!(compose, a); - } - - #[test] - fn baby_bear_test() { - let p = BabyBear::MODULUS_VALUE; - - let distr = Uniform::new(0, p); - let mut rng = thread_rng(); - - assert!(BabyBear::is_prime_field()); - - // add - let a = rng.sample(distr); - let b = rng.sample(distr); - let c = (a + b) % p; - assert_eq!(BabyBear::new(a) + BabyBear::new(b), BabyBear::new(c)); - - // add_assign - let mut a = BabyBear::new(a); - a += BabyBear::new(b); - assert_eq!(a, BabyBear::new(c)); - - // sub - let a = rng.sample(distr); - let b = rng.gen_range(0..=a); - let c = (a - b) % p; - assert_eq!(BabyBear::new(a) - BabyBear::new(b), BabyBear::new(c)); - - // sub_assign - let mut a = BabyBear::new(a); - a -= BabyBear::new(b); - assert_eq!(a, BabyBear::new(c)); - - // mul - let a = rng.sample(distr); - let b = rng.sample(distr); - let c = ((a as W * b as W) % p as W) as T; - assert_eq!(BabyBear::new(a) * BabyBear::new(b), BabyBear::new(c)); - - // mul_assign - let mut a = BabyBear::new(a); - a *= BabyBear::new(b); - assert_eq!(a, BabyBear::new(c)); - - // div - let a = rng.sample(distr); - let b = rng.sample(distr); - let b_inv = from_monty((to_monty(b)).pow_reduce(p - 2, BabyBearModulus)); - let c = ((a as W * b_inv as W) % (p as W)) as T; - assert_eq!(BabyBear::new(a) / BabyBear::new(b), BabyBear::new(c)); - - // div_assign - let mut a = BabyBear::new(a); - a /= BabyBear::new(b); - assert_eq!(a, BabyBear::new(c)); - - // neg - let a = rng.sample(distr); - let a_neg = -BabyBear::new(a); - assert_eq!(BabyBear::new(a) + a_neg, BabyBear::zero()); - - let a = BabyBear::zero(); - assert_eq!(a, -a); - - // inv - let a = rng.sample(distr); - let a_inv = from_monty((to_monty(a)).pow_reduce(p - 2, BabyBearModulus)); - assert_eq!(BabyBear::new(a).inv(), BabyBear::new(a_inv)); - assert_eq!(BabyBear::new(a) * BabyBear::new(a_inv), BabyBear::one()); - - // associative - let a = rng.sample(distr); - let b = rng.sample(distr); - let c = rng.sample(distr); - assert_eq!( - (BabyBear::new(a) + BabyBear::new(b)) + BabyBear::new(c), - BabyBear::new(a) + (BabyBear::new(b) + BabyBear::new(c)) - ); - assert_eq!( - (BabyBear::new(a) * BabyBear::new(b)) * BabyBear::new(c), - BabyBear::new(a) * (BabyBear::new(b) * BabyBear::new(c)) - ); - - // commutative - let a = rng.sample(distr); - let b = rng.sample(distr); - assert_eq!( - BabyBear::new(a) + BabyBear::new(b), - BabyBear::new(b) + BabyBear::new(a) - ); - assert_eq!( - BabyBear::new(a) * BabyBear::new(b), - BabyBear::new(b) * BabyBear::new(a) - ); - - // identity - let a = rng.sample(distr); - assert_eq!(BabyBear::new(a) + BabyBear::new(0), BabyBear::new(a)); - assert_eq!(BabyBear::new(a) * BabyBear::new(1), BabyBear::new(a)); - - // distribute - let a = rng.sample(distr); - let b = rng.sample(distr); - let c = rng.sample(distr); - assert_eq!( - (BabyBear::new(a) + BabyBear::new(b)) * BabyBear::new(c), - (BabyBear::new(a) * BabyBear::new(c)) + (BabyBear::new(b) * BabyBear::new(c)) - ); - - const BITS: u32 = 2; - const B: u32 = 1 << BITS; - let basis = >::new(BITS); - let rng = &mut thread_rng(); - - let uniform = >::new(); - let a: BabyBear = uniform.sample(rng); - let decompose = a.decompose(basis); - let compose = decompose - .into_iter() - .enumerate() - .fold(BabyBear::new(0), |acc, (i, d)| { - acc + d * BabyBear::new(B.pow(i as T) as T) - }); - - assert_eq!(compose, a); - } - - #[test] - fn goldilocks_test() { - let p = Goldilocks::MODULUS_VALUE; - - let distr = Uniform::new(0, p); - let mut rng = thread_rng(); - - assert!(Goldilocks::is_prime_field()); - - // add - let a = rng.sample(distr); - let b = rng.sample(distr); - let c = ((a as u128 + b as u128) % (p as u128)) as u64; - assert_eq!(Goldilocks::new(a) + Goldilocks::new(b), Goldilocks::new(c)); - - // add_assign - let mut a = Goldilocks::new(a); - a += Goldilocks::new(b); - assert_eq!(a, Goldilocks::new(c)); - - // sub - let a = rng.sample(distr); - let b = rng.gen_range(0..=a); - let c = (a - b) % p; - assert_eq!(Goldilocks::new(a) - Goldilocks::new(b), Goldilocks::new(c)); - - // sub_assign - let mut a = Goldilocks::new(a); - a -= Goldilocks::new(b); - assert_eq!(a, Goldilocks::new(c)); - - // mul - let a = rng.sample(distr); - let b = rng.sample(distr); - let c = ((a as u128 * b as u128) % p as u128) as u64; - assert_eq!(Goldilocks::new(a) * Goldilocks::new(b), Goldilocks::new(c)); - - // mul_assign - let mut a = Goldilocks::new(a); - a *= Goldilocks::new(b); - assert_eq!(a, Goldilocks::new(c)); - - // div - let a = rng.sample(distr); - let b = rng.sample(distr); - let b_inv = to_canonical_u64(b.pow_reduce(p - 2, GoldilocksModulus)); - let c = ((a as u128 * b_inv as u128) % (p as u128)) as u64; - assert_eq!(Goldilocks::new(a) / Goldilocks::new(b), Goldilocks::new(c)); - - // div_assign - let mut a = Goldilocks::new(a); - a /= Goldilocks::new(b); - assert_eq!(a, Goldilocks::new(c)); - - // neg - let a = rng.sample(distr); - let a_neg = -Goldilocks::new(a); - assert_eq!(Goldilocks::new(a) + a_neg, Goldilocks::zero()); - - let a = Goldilocks::zero(); - assert_eq!(a, -a); - - // inv - let a = rng.sample(distr); - let a_inv = to_canonical_u64(a.pow_reduce(p - 2, GoldilocksModulus)); - assert_eq!(Goldilocks::new(a).inv(), Goldilocks::new(a_inv)); - assert_eq!( - Goldilocks::new(a) * Goldilocks::new(a_inv), - Goldilocks::one() - ); - - // associative - let a = rng.sample(distr); - let b = rng.sample(distr); - let c = rng.sample(distr); - assert_eq!( - (Goldilocks::new(a) + Goldilocks::new(b)) + Goldilocks::new(c), - Goldilocks::new(a) + (Goldilocks::new(b) + Goldilocks::new(c)) - ); - assert_eq!( - (Goldilocks::new(a) * Goldilocks::new(b)) * Goldilocks::new(c), - Goldilocks::new(a) * (Goldilocks::new(b) * Goldilocks::new(c)) - ); - - // commutative - let a = rng.sample(distr); - let b = rng.sample(distr); - assert_eq!( - Goldilocks::new(a) + Goldilocks::new(b), - Goldilocks::new(b) + Goldilocks::new(a) - ); - assert_eq!( - Goldilocks::new(a) * Goldilocks::new(b), - Goldilocks::new(b) * Goldilocks::new(a) - ); - - // identity - let a = rng.sample(distr); - assert_eq!(Goldilocks::new(a) + Goldilocks::new(0), Goldilocks::new(a)); - assert_eq!(Goldilocks::new(a) * Goldilocks::new(1), Goldilocks::new(a)); - - // distribute - let a = rng.sample(distr); - let b = rng.sample(distr); - let c = rng.sample(distr); - assert_eq!( - (Goldilocks::new(a) + Goldilocks::new(b)) * Goldilocks::new(c), - (Goldilocks::new(a) * Goldilocks::new(c)) + (Goldilocks::new(b) * Goldilocks::new(c)) - ); - - const BITS: u32 = 2; - const B: u32 = 1 << BITS; - let basis = >::new(BITS); - let rng = &mut thread_rng(); - - let uniform = >::new(); - let a: Goldilocks = uniform.sample(rng); - let decompose = a.decompose(basis); - let compose = decompose - .into_iter() - .enumerate() - .fold(Goldilocks::new(0), |acc, (i, d)| { - acc + d * Goldilocks::new((B as u64).pow(i as u32)) - }); - - assert_eq!(compose, a); - } +type FF = Fp32; +type T = u32; +type W = u64; + +#[test] +fn test_fp() { + let p = FF::MODULUS.value(); + + let distr = Uniform::new(0, p); + let mut rng = thread_rng(); + + assert!(FF::is_prime_field()); + + // add + let a = rng.sample(distr); + let b = rng.sample(distr); + let c = (a + b) % p; + assert_eq!(FF::new(a) + FF::new(b), FF::new(c)); + + // add_assign + let mut a = FF::new(a); + a += FF::new(b); + assert_eq!(a, FF::new(c)); + + // sub + let a = rng.sample(distr); + let b = rng.gen_range(0..=a); + let c = (a - b) % p; + assert_eq!(FF::new(a) - FF::new(b), FF::new(c)); + + // sub_assign + let mut a = FF::new(a); + a -= FF::new(b); + assert_eq!(a, FF::new(c)); + + // mul + let a = rng.sample(distr); + let b = rng.sample(distr); + let c = ((a as W * b as W) % p as W) as T; + assert_eq!(FF::new(a) * FF::new(b), FF::new(c)); + + // mul_assign + let mut a = FF::new(a); + a *= FF::new(b); + assert_eq!(a, FF::new(c)); + + // div + let a = rng.sample(distr); + let b = rng.sample(distr); + let b_inv = b.pow_reduce(p - 2, BarrettModulus::::new(p)); + let c = ((a as W * b_inv as W) % p as W) as T; + assert_eq!(FF::new(a) / FF::new(b), FF::new(c)); + + // div_assign + let mut a = FF::new(a); + a /= FF::new(b); + assert_eq!(a, FF::new(c)); + + // neg + let a = rng.sample(distr); + let a_neg = -FF::new(a); + assert_eq!(FF::new(a) + a_neg, FF::zero()); + + let a = FF::zero(); + assert_eq!(a, -a); + + // inv + let a = rng.sample(distr); + let a_inv = a.pow_reduce(p - 2, BarrettModulus::::new(p)); + assert_eq!(FF::new(a).inv(), FF::new(a_inv)); + assert_eq!(FF::new(a) * FF::new(a_inv), FF::one()); + + // associative + let a = rng.sample(distr); + let b = rng.sample(distr); + let c = rng.sample(distr); + assert_eq!( + (FF::new(a) + FF::new(b)) + FF::new(c), + FF::new(a) + (FF::new(b) + FF::new(c)) + ); + assert_eq!( + (FF::new(a) * FF::new(b)) * FF::new(c), + FF::new(a) * (FF::new(b) * FF::new(c)) + ); + + // commutative + let a = rng.sample(distr); + let b = rng.sample(distr); + assert_eq!(FF::new(a) + FF::new(b), FF::new(b) + FF::new(a)); + assert_eq!(FF::new(a) * FF::new(b), FF::new(b) * FF::new(a)); + + // identity + let a = rng.sample(distr); + assert_eq!(FF::new(a) + FF::new(0), FF::new(a)); + assert_eq!(FF::new(a) * FF::new(1), FF::new(a)); + + // distribute + let a = rng.sample(distr); + let b = rng.sample(distr); + let c = rng.sample(distr); + assert_eq!( + (FF::new(a) + FF::new(b)) * FF::new(c), + (FF::new(a) * FF::new(c)) + (FF::new(b) * FF::new(c)) + ); +} + +#[test] +fn test_decompose() { + const BITS: u32 = 2; + const B: u32 = 1 << BITS; + let basis = >::new(BITS); + let rng = &mut thread_rng(); + + let uniform = >::new(); + let a: FF = uniform.sample(rng); + let decompose = a.decompose(basis); + let compose = decompose + .into_iter() + .enumerate() + .fold(FF::new(0), |acc, (i, d)| { + acc + d * FF::new(B.pow(i as T) as T) + }); + + assert_eq!(compose, a); +} + +#[cfg(feature = "concrete-ntt")] +use algebra::{ + modulus::{from_monty, to_canonical_u64, to_monty, BabyBearModulus, GoldilocksModulus}, + BabyBear, Goldilocks, +}; + +#[test] +#[cfg(feature = "concrete-ntt")] +fn baby_bear_test() { + let p = BabyBear::MODULUS_VALUE; + + let distr = Uniform::new(0, p); + let mut rng = thread_rng(); + + assert!(BabyBear::is_prime_field()); + + // add + let a = rng.sample(distr); + let b = rng.sample(distr); + let c = (a + b) % p; + assert_eq!(BabyBear::new(a) + BabyBear::new(b), BabyBear::new(c)); + + // add_assign + let mut a = BabyBear::new(a); + a += BabyBear::new(b); + assert_eq!(a, BabyBear::new(c)); + + // sub + let a = rng.sample(distr); + let b = rng.gen_range(0..=a); + let c = (a - b) % p; + assert_eq!(BabyBear::new(a) - BabyBear::new(b), BabyBear::new(c)); + + // sub_assign + let mut a = BabyBear::new(a); + a -= BabyBear::new(b); + assert_eq!(a, BabyBear::new(c)); + + // mul + let a = rng.sample(distr); + let b = rng.sample(distr); + let c = ((a as W * b as W) % p as W) as T; + assert_eq!(BabyBear::new(a) * BabyBear::new(b), BabyBear::new(c)); + + // mul_assign + let mut a = BabyBear::new(a); + a *= BabyBear::new(b); + assert_eq!(a, BabyBear::new(c)); + + // div + let a = rng.sample(distr); + let b = rng.sample(distr); + let b_inv = from_monty((to_monty(b)).pow_reduce(p - 2, BabyBearModulus)); + let c = ((a as W * b_inv as W) % (p as W)) as T; + assert_eq!(BabyBear::new(a) / BabyBear::new(b), BabyBear::new(c)); + + // div_assign + let mut a = BabyBear::new(a); + a /= BabyBear::new(b); + assert_eq!(a, BabyBear::new(c)); + + // neg + let a = rng.sample(distr); + let a_neg = -BabyBear::new(a); + assert_eq!(BabyBear::new(a) + a_neg, BabyBear::zero()); + + let a = BabyBear::zero(); + assert_eq!(a, -a); + + // inv + let a = rng.sample(distr); + let a_inv = from_monty((to_monty(a)).pow_reduce(p - 2, BabyBearModulus)); + assert_eq!(BabyBear::new(a).inv(), BabyBear::new(a_inv)); + assert_eq!(BabyBear::new(a) * BabyBear::new(a_inv), BabyBear::one()); + + // associative + let a = rng.sample(distr); + let b = rng.sample(distr); + let c = rng.sample(distr); + assert_eq!( + (BabyBear::new(a) + BabyBear::new(b)) + BabyBear::new(c), + BabyBear::new(a) + (BabyBear::new(b) + BabyBear::new(c)) + ); + assert_eq!( + (BabyBear::new(a) * BabyBear::new(b)) * BabyBear::new(c), + BabyBear::new(a) * (BabyBear::new(b) * BabyBear::new(c)) + ); + + // commutative + let a = rng.sample(distr); + let b = rng.sample(distr); + assert_eq!( + BabyBear::new(a) + BabyBear::new(b), + BabyBear::new(b) + BabyBear::new(a) + ); + assert_eq!( + BabyBear::new(a) * BabyBear::new(b), + BabyBear::new(b) * BabyBear::new(a) + ); + + // identity + let a = rng.sample(distr); + assert_eq!(BabyBear::new(a) + BabyBear::new(0), BabyBear::new(a)); + assert_eq!(BabyBear::new(a) * BabyBear::new(1), BabyBear::new(a)); + + // distribute + let a = rng.sample(distr); + let b = rng.sample(distr); + let c = rng.sample(distr); + assert_eq!( + (BabyBear::new(a) + BabyBear::new(b)) * BabyBear::new(c), + (BabyBear::new(a) * BabyBear::new(c)) + (BabyBear::new(b) * BabyBear::new(c)) + ); + + const BITS: u32 = 2; + const B: u32 = 1 << BITS; + let basis = >::new(BITS); + let rng = &mut thread_rng(); + + let uniform = >::new(); + let a: BabyBear = uniform.sample(rng); + let decompose = a.decompose(basis); + let compose = decompose + .into_iter() + .enumerate() + .fold(BabyBear::new(0), |acc, (i, d)| { + acc + d * BabyBear::new(B.pow(i as T) as T) + }); + + assert_eq!(compose, a); +} + +#[test] +#[cfg(feature = "concrete-ntt")] +fn goldilocks_test() { + let p = Goldilocks::MODULUS_VALUE; + + let distr = Uniform::new(0, p); + let mut rng = thread_rng(); + + assert!(Goldilocks::is_prime_field()); + + // add + let a = rng.sample(distr); + let b = rng.sample(distr); + let c = ((a as u128 + b as u128) % (p as u128)) as u64; + assert_eq!(Goldilocks::new(a) + Goldilocks::new(b), Goldilocks::new(c)); + + // add_assign + let mut a = Goldilocks::new(a); + a += Goldilocks::new(b); + assert_eq!(a, Goldilocks::new(c)); + + // sub + let a = rng.sample(distr); + let b = rng.gen_range(0..=a); + let c = (a - b) % p; + assert_eq!(Goldilocks::new(a) - Goldilocks::new(b), Goldilocks::new(c)); + + // sub_assign + let mut a = Goldilocks::new(a); + a -= Goldilocks::new(b); + assert_eq!(a, Goldilocks::new(c)); + + // mul + let a = rng.sample(distr); + let b = rng.sample(distr); + let c = ((a as u128 * b as u128) % p as u128) as u64; + assert_eq!(Goldilocks::new(a) * Goldilocks::new(b), Goldilocks::new(c)); + + // mul_assign + let mut a = Goldilocks::new(a); + a *= Goldilocks::new(b); + assert_eq!(a, Goldilocks::new(c)); + + // div + let a = rng.sample(distr); + let b = rng.sample(distr); + let b_inv = to_canonical_u64(b.pow_reduce(p - 2, GoldilocksModulus)); + let c = ((a as u128 * b_inv as u128) % (p as u128)) as u64; + assert_eq!(Goldilocks::new(a) / Goldilocks::new(b), Goldilocks::new(c)); + + // div_assign + let mut a = Goldilocks::new(a); + a /= Goldilocks::new(b); + assert_eq!(a, Goldilocks::new(c)); + + // neg + let a = rng.sample(distr); + let a_neg = -Goldilocks::new(a); + assert_eq!(Goldilocks::new(a) + a_neg, Goldilocks::zero()); + + let a = Goldilocks::zero(); + assert_eq!(a, -a); + + // inv + let a = rng.sample(distr); + let a_inv = to_canonical_u64(a.pow_reduce(p - 2, GoldilocksModulus)); + assert_eq!(Goldilocks::new(a).inv(), Goldilocks::new(a_inv)); + assert_eq!( + Goldilocks::new(a) * Goldilocks::new(a_inv), + Goldilocks::one() + ); + + // associative + let a = rng.sample(distr); + let b = rng.sample(distr); + let c = rng.sample(distr); + assert_eq!( + (Goldilocks::new(a) + Goldilocks::new(b)) + Goldilocks::new(c), + Goldilocks::new(a) + (Goldilocks::new(b) + Goldilocks::new(c)) + ); + assert_eq!( + (Goldilocks::new(a) * Goldilocks::new(b)) * Goldilocks::new(c), + Goldilocks::new(a) * (Goldilocks::new(b) * Goldilocks::new(c)) + ); + + // commutative + let a = rng.sample(distr); + let b = rng.sample(distr); + assert_eq!( + Goldilocks::new(a) + Goldilocks::new(b), + Goldilocks::new(b) + Goldilocks::new(a) + ); + assert_eq!( + Goldilocks::new(a) * Goldilocks::new(b), + Goldilocks::new(b) * Goldilocks::new(a) + ); + + // identity + let a = rng.sample(distr); + assert_eq!(Goldilocks::new(a) + Goldilocks::new(0), Goldilocks::new(a)); + assert_eq!(Goldilocks::new(a) * Goldilocks::new(1), Goldilocks::new(a)); + + // distribute + let a = rng.sample(distr); + let b = rng.sample(distr); + let c = rng.sample(distr); + assert_eq!( + (Goldilocks::new(a) + Goldilocks::new(b)) * Goldilocks::new(c), + (Goldilocks::new(a) * Goldilocks::new(c)) + (Goldilocks::new(b) * Goldilocks::new(c)) + ); + + const BITS: u32 = 2; + const B: u32 = 1 << BITS; + let basis = >::new(BITS); + let rng = &mut thread_rng(); + + let uniform = >::new(); + let a: Goldilocks = uniform.sample(rng); + let decompose = a.decompose(basis); + let compose = decompose + .into_iter() + .enumerate() + .fold(Goldilocks::new(0), |acc, (i, d)| { + acc + d * Goldilocks::new((B as u64).pow(i as u32)) + }); + + assert_eq!(compose, a); } diff --git a/algebra/tests/extension_test.rs b/algebra/tests/extension_test.rs index 7b12c0b0..8aa76c1e 100644 --- a/algebra/tests/extension_test.rs +++ b/algebra/tests/extension_test.rs @@ -1,3 +1,5 @@ +#[cfg(test)] +#[cfg(feature = "concrete-ntt")] mod tests { use algebra::{ AbstractExtensionField, BabyBear, BabyBearExetension, FieldUniformSampler, Goldilocks, diff --git a/fhe_core/Cargo.toml b/fhe_core/Cargo.toml index 8f3b9af8..3e61912c 100644 --- a/fhe_core/Cargo.toml +++ b/fhe_core/Cargo.toml @@ -10,7 +10,7 @@ lattice = { path = "../lattice", default-features = false } thiserror = { workspace = true } num-traits = { workspace = true } rand = { workspace = true } -rand_chacha = "0.3.1" +rand_chacha = { workspace = true } once_cell = { workspace = true } serde = { workspace = true } diff --git a/fhe_core/src/key_switch/lwe.rs b/fhe_core/src/key_switch/lwe.rs index 84a80106..cf7c6bf8 100644 --- a/fhe_core/src/key_switch/lwe.rs +++ b/fhe_core/src/key_switch/lwe.rs @@ -43,7 +43,7 @@ impl KeySwitchingLWEKey { lattice::Basis::::new(params.key_switching_basis_bits(), lwe_cipher_modulus_value); let neg_one = lwe_cipher_modulus_value - C::ONE; - // convertion + // conversion let convert = |v: &Q| { if v.is_zero() { C::ZERO diff --git a/fhe_core/src/key_switch/rlwe.rs b/fhe_core/src/key_switch/rlwe.rs index fe40cc06..814ecaca 100644 --- a/fhe_core/src/key_switch/rlwe.rs +++ b/fhe_core/src/key_switch/rlwe.rs @@ -42,7 +42,7 @@ impl KeySwitchingRLWEKey { let key_switching_basis = Basis::::new(parameters.key_switching_basis_bits()); - // convertion + // conversion let convert = |v: &C| { if *v == C::ZERO { Q::zero() diff --git a/fhe_core/src/parameter.rs b/fhe_core/src/parameter.rs index 460fb85b..915ab270 100644 --- a/fhe_core/src/parameter.rs +++ b/fhe_core/src/parameter.rs @@ -203,7 +203,7 @@ impl Parameters { match steps { Steps::BrMsKs => { if blind_rotation_type == BlindRotationType::NTRU { - // This method is not supportting `NTRU` now. + // This method is not supporting `NTRU` now. return Err(FHECoreError::StepsParametersNotCompatible); } if !(ring_secret_key_type == RingSecretKeyType::Binary diff --git a/fhe_core/src/secret_key.rs b/fhe_core/src/secret_key.rs index c4022141..1528cde7 100644 --- a/fhe_core/src/secret_key.rs +++ b/fhe_core/src/secret_key.rs @@ -120,7 +120,7 @@ impl SecretKeyPack { || params.ring_secret_key_type() == RingSecretKeyType::Ternary ); assert_eq!(params.lwe_dimension(), params.ring_dimension()); - // convertion + // conversion let convert = |v: &C| { if v.is_zero() { Q::zero() diff --git a/lattice/Cargo.toml b/lattice/Cargo.toml index 2e2f006b..e12cbafd 100644 --- a/lattice/Cargo.toml +++ b/lattice/Cargo.toml @@ -10,8 +10,8 @@ algebra = { path = "../algebra", default-features = false } num-traits = { workspace = true } once_cell = { workspace = true } -rand_distr = { workspace = true } rand = { workspace = true } +rand_distr = { workspace = true } serde = { workspace = true } [dev-dependencies] diff --git a/pcs/src/multilinear/brakedown/mod.rs b/pcs/src/multilinear/brakedown/mod.rs index f2183db3..c099f05f 100644 --- a/pcs/src/multilinear/brakedown/mod.rs +++ b/pcs/src/multilinear/brakedown/mod.rs @@ -48,7 +48,7 @@ where EF: AbstractExtensionField, { /// Prover answers the challenge by computing the product of the challenge vector - /// and the commited matirx. + /// and the committed matrix. /// The computation of the product can be viewed as a linear combination of rows /// of the matrix with challenge vector as the coefficients. fn answer_challenge( diff --git a/pcs/src/utils/arithmetic.rs b/pcs/src/utils/arithmetic.rs index 406e314d..8230272b 100644 --- a/pcs/src/utils/arithmetic.rs +++ b/pcs/src/utils/arithmetic.rs @@ -82,7 +82,7 @@ impl SparseMatrix { /// /// # Arguments /// - /// * `vecotr` - The vector that are multiplied by the sparce matrix. + /// * `vector` - The vector that are multiplied by the sparce matrix. /// * `target` - The vector that are added to the multiplication, and stores the result. #[inline] pub fn add_multiplied_vector(&self, vector: &[F], target: &mut [F]) { @@ -103,7 +103,7 @@ impl SparseMatrix { /// /// # Arguments /// - /// * `vecotr` - The vector in the extension field that are multiplied by the sparce matrix. + /// * `vector` - The vector in the extension field that are multiplied by the sparce matrix. /// * `target` - The vector in the extension field that are added to the multiplication, and stores the result. #[inline] pub fn add_multiplied_vector_ext>( diff --git a/pcs/src/utils/code/mod.rs b/pcs/src/utils/code/mod.rs index ec856c8d..9b866f5e 100644 --- a/pcs/src/utils/code/mod.rs +++ b/pcs/src/utils/code/mod.rs @@ -40,8 +40,8 @@ pub trait LinearCodeSpec: Sync + Send + Default { type Code: LinearCode; /// Generate the instance of linear code fn code(&self, message_ln: usize, rng: &mut (impl Rng + CryptoRng)) -> Self::Code; - /// Distance of the linear code when avaible from linear code specification + /// Distance of the linear code when available from linear code specification fn distance(&self) -> Result; - /// Proximity gap of the linear code when avaible from linear code specification + /// Proximity gap of the linear code when available from linear code specification fn proximity_gap(&self) -> Result; } diff --git a/pcs/src/utils/merkle_tree.rs b/pcs/src/utils/merkle_tree.rs index b3191a20..75c7832b 100644 --- a/pcs/src/utils/merkle_tree.rs +++ b/pcs/src/utils/merkle_tree.rs @@ -55,7 +55,7 @@ impl MerkleTree { } /// Instantiate a merkle tree by committing the leaves - /// In this case, we assume all the input leafs as the hashed values. + /// In this case, we assume all the input leaves as the hashed values. /// /// # Arguments. /// diff --git a/zkfhe/benches/zk_bfhe.rs b/zkfhe/benches/zk_bfhe.rs index e864a994..8e901991 100644 --- a/zkfhe/benches/zk_bfhe.rs +++ b/zkfhe/benches/zk_bfhe.rs @@ -1,7 +1,7 @@ use criterion::{black_box, criterion_group, criterion_main, Criterion}; use rand::Rng; use zkfhe::{ - bfhe::{Evaluator, DEFAULT_TERNARY_128_BITS_PARAMERTERS}, + bfhe::{Evaluator, DEFAULT_TERNARY_128_BITS_PARAMETERS}, Encryptor, KeyGen, }; @@ -12,7 +12,7 @@ pub fn criterion_benchmark(c: &mut Criterion) { let mut rng = rand::thread_rng(); // set parameter - let default_parameters = *DEFAULT_TERNARY_128_BITS_PARAMERTERS; + let default_parameters = *DEFAULT_TERNARY_128_BITS_PARAMETERS; // generate keys let sk = KeyGen::generate_secret_key(default_parameters); diff --git a/zkfhe/benches/zk_bnfhe.rs b/zkfhe/benches/zk_bnfhe.rs index 5a2d2dcd..aa28a2d7 100644 --- a/zkfhe/benches/zk_bnfhe.rs +++ b/zkfhe/benches/zk_bnfhe.rs @@ -1,7 +1,7 @@ use criterion::{black_box, criterion_group, criterion_main, Criterion}; use rand::Rng; use zkfhe::{ - ntru_bfhe::{Evaluator, DEFAULT_TERNARY_128_BITS_NTRU_PARAMERTERS}, + ntru_bfhe::{Evaluator, DEFAULT_TERNARY_128_BITS_NTRU_PARAMETERS}, Encryptor, KeyGen, }; @@ -12,7 +12,7 @@ pub fn criterion_benchmark(c: &mut Criterion) { let mut rng = rand::thread_rng(); // set parameter - let default_parameters = *DEFAULT_TERNARY_128_BITS_NTRU_PARAMERTERS; + let default_parameters = *DEFAULT_TERNARY_128_BITS_NTRU_PARAMETERS; // generate keys let sk = KeyGen::generate_secret_key(default_parameters); diff --git a/zkfhe/examples/count_ntt.rs b/zkfhe/examples/count_ntt.rs index a2a497e8..f2b5847a 100644 --- a/zkfhe/examples/count_ntt.rs +++ b/zkfhe/examples/count_ntt.rs @@ -5,7 +5,7 @@ use algebra::transformation::count; use fhe_core::utils::nand; use rand::Rng; use zkfhe::{ - bfhe::{Evaluator, DEFAULT_TERNARY_128_BITS_PARAMERTERS}, + bfhe::{Evaluator, DEFAULT_TERNARY_128_BITS_PARAMETERS}, Decryptor, Encryptor, KeyGen, }; @@ -17,7 +17,7 @@ fn main() { let mut rng = rand::thread_rng(); // set parameter - let params = *DEFAULT_TERNARY_128_BITS_PARAMERTERS; + let params = *DEFAULT_TERNARY_128_BITS_PARAMETERS; let noise_max = (params.lwe_cipher_modulus_value() as f64 / 16.0) as C; diff --git a/zkfhe/examples/zk_bfhe.rs b/zkfhe/examples/zk_bfhe.rs index b44d530b..d18b2c82 100644 --- a/zkfhe/examples/zk_bfhe.rs +++ b/zkfhe/examples/zk_bfhe.rs @@ -2,7 +2,7 @@ use algebra::NTTField; use fhe_core::{utils::*, LWECiphertext, LWEModulusType}; use rand::Rng; use zkfhe::{ - bfhe::{Evaluator, DEFAULT_TERNARY_128_BITS_PARAMERTERS}, + bfhe::{Evaluator, DEFAULT_TERNARY_128_BITS_PARAMETERS}, Decryptor, Encryptor, KeyGen, }; @@ -14,7 +14,7 @@ fn main() { let mut rng = rand::thread_rng(); // set parameter - let params = *DEFAULT_TERNARY_128_BITS_PARAMERTERS; + let params = *DEFAULT_TERNARY_128_BITS_PARAMETERS; let noise_max = (params.lwe_cipher_modulus_value() as f64 / 16.0) as C; @@ -53,9 +53,9 @@ fn main() { // perform all other homomorphic bit operations let start = std::time::Instant::now(); let (ct_and, ct_nand, ct_or, ct_nor, ct_xor, ct_xnor, ct_majority, ct_mux) = - join_bit_opearions(&eval, &x, &y, &z); + join_bit_operations(&eval, &x, &y, &z); let duration = start.elapsed(); - println!("Time elapsed in join_bit_opearions() is: {:?}", duration); + println!("Time elapsed in join_bit_operations() is: {:?}", duration); // majority let (ma, noise) = dec.decrypt_with_noise(&ct_majority); @@ -111,7 +111,7 @@ fn main() { } #[allow(clippy::type_complexity)] -fn join_bit_opearions( +fn join_bit_operations( eval: &Evaluator, x: &LWECiphertext, y: &LWECiphertext, diff --git a/zkfhe/examples/zk_bnfhe.rs b/zkfhe/examples/zk_bnfhe.rs index 23da6599..9a884c2a 100644 --- a/zkfhe/examples/zk_bnfhe.rs +++ b/zkfhe/examples/zk_bnfhe.rs @@ -1,7 +1,7 @@ use algebra::NTTField; use fhe_core::{utils::*, LWECiphertext, LWEModulusType, SecretKeyPack}; use rand::Rng; -use zkfhe::ntru_bfhe::{Evaluator, DEFAULT_TERNARY_128_BITS_NTRU_PARAMERTERS}; +use zkfhe::ntru_bfhe::{Evaluator, DEFAULT_TERNARY_128_BITS_NTRU_PARAMETERS}; use zkfhe::{Decryptor, Encryptor}; type M = bool; @@ -12,7 +12,7 @@ fn main() { let mut rng = rand::thread_rng(); // set parameter - let params = *DEFAULT_TERNARY_128_BITS_NTRU_PARAMERTERS; + let params = *DEFAULT_TERNARY_128_BITS_NTRU_PARAMETERS; let noise_max = (params.lwe_cipher_modulus_value() as f64 / 16.0) as C; @@ -51,9 +51,9 @@ fn main() { // perform all other homomorphic bit operations let start = std::time::Instant::now(); let (ct_and, ct_nand, ct_or, ct_nor, ct_xor, ct_xnor, ct_majority, ct_mux) = - join_bit_opearions(&evaluator, &x, &y, &z); + join_bit_operations(&evaluator, &x, &y, &z); let duration = start.elapsed(); - println!("Time elapsed in join_bit_opearions() is: {:?}", duration); + println!("Time elapsed in join_bit_operations() is: {:?}", duration); // majority let (ma, noise) = decryptor.decrypt_with_noise::(&ct_majority); @@ -109,7 +109,7 @@ fn main() { } #[allow(clippy::type_complexity)] -fn join_bit_opearions( +fn join_bit_operations( evk: &Evaluator, x: &LWECiphertext, y: &LWECiphertext, diff --git a/zkfhe/src/bfhe/mod.rs b/zkfhe/src/bfhe/mod.rs index 22ff3329..3086f296 100644 --- a/zkfhe/src/bfhe/mod.rs +++ b/zkfhe/src/bfhe/mod.rs @@ -2,4 +2,4 @@ mod evaluate; mod parameters; pub use evaluate::Evaluator; -pub use parameters::{CUSTOM_TERNARY_128_BITS_PARAMERTERS, DEFAULT_TERNARY_128_BITS_PARAMERTERS}; +pub use parameters::{CUSTOM_TERNARY_128_BITS_PARAMETERS, DEFAULT_TERNARY_128_BITS_PARAMETERS}; diff --git a/zkfhe/src/bfhe/parameters.rs b/zkfhe/src/bfhe/parameters.rs index ec9c9f2a..29dd9c1a 100644 --- a/zkfhe/src/bfhe/parameters.rs +++ b/zkfhe/src/bfhe/parameters.rs @@ -6,7 +6,7 @@ use fhe_core::{ use once_cell::sync::Lazy; /// Default 128-bits security Parameters -pub static DEFAULT_TERNARY_128_BITS_PARAMERTERS: Lazy> = +pub static DEFAULT_TERNARY_128_BITS_PARAMETERS: Lazy> = Lazy::new(|| { Parameters::::new(ConstParameters { lwe_dimension: 1024, @@ -29,7 +29,7 @@ pub static DEFAULT_TERNARY_128_BITS_PARAMERTERS: Lazy> = +pub static CUSTOM_TERNARY_128_BITS_PARAMETERS: Lazy> = Lazy::new(|| { Parameters::::new(ConstParameters { lwe_dimension: 512, diff --git a/zkfhe/src/ntru_bfhe/mod.rs b/zkfhe/src/ntru_bfhe/mod.rs index 0eff119b..50d33f80 100644 --- a/zkfhe/src/ntru_bfhe/mod.rs +++ b/zkfhe/src/ntru_bfhe/mod.rs @@ -2,4 +2,4 @@ mod evaluate; mod parameters; pub use evaluate::Evaluator; -pub use parameters::DEFAULT_TERNARY_128_BITS_NTRU_PARAMERTERS; +pub use parameters::DEFAULT_TERNARY_128_BITS_NTRU_PARAMETERS; diff --git a/zkfhe/src/ntru_bfhe/parameters.rs b/zkfhe/src/ntru_bfhe/parameters.rs index 2f674949..a0eec7a9 100644 --- a/zkfhe/src/ntru_bfhe/parameters.rs +++ b/zkfhe/src/ntru_bfhe/parameters.rs @@ -6,7 +6,7 @@ use fhe_core::{ use once_cell::sync::Lazy; /// Default 128-bits security Parameters -pub static DEFAULT_TERNARY_128_BITS_NTRU_PARAMERTERS: Lazy> = +pub static DEFAULT_TERNARY_128_BITS_NTRU_PARAMETERS: Lazy> = Lazy::new(|| { Parameters::::new(ConstParameters { lwe_dimension: 590, diff --git a/zkp/Cargo.toml b/zkp/Cargo.toml index b2e6491f..1d5733a5 100644 --- a/zkp/Cargo.toml +++ b/zkp/Cargo.toml @@ -8,11 +8,11 @@ edition = "2021" [dependencies] algebra = { path = "../algebra" } -rand = { workspace = true } thiserror = { workspace = true } +rand = { workspace = true } +rand_distr = { workspace = true } +rand_chacha = { workspace = true } num-traits = { workspace = true } once_cell = { workspace = true } -rand_distr = { workspace = true } -rand_chacha = "0.3.1" -serde = {workspace = true} -itertools = "0.12.1" +serde = { workspace = true } +itertools = { workspace = true } diff --git a/zkp/src/piop/accumulator.rs b/zkp/src/piop/accumulator.rs index 376f63e9..043c6d51 100644 --- a/zkp/src/piop/accumulator.rs +++ b/zkp/src/piop/accumulator.rs @@ -21,12 +21,12 @@ use super::ntt::{NTTProof, NTTSubclaim}; use super::{DecomposedBits, DecomposedBitsInfo, NTTInstance, NTTInstanceInfo, NTTIOP}; use super::{RlweCiphertext, RlweCiphertexts}; -/// SNARKs for Mutliplication between RLWE ciphertext and RGSW ciphertext +/// SNARKs for Multiplication between RLWE ciphertext and RGSW ciphertext pub struct AccumulatorIOP(PhantomData); /// proof generated by prover pub struct AccumulatorProof { - /// proof for bit decompostion + /// proof for bit decomposition pub bit_decomposition_proof: BitDecompositionProof, /// proof for ntt pub ntt_proof: NTTProof, @@ -47,6 +47,7 @@ pub struct AccumulatorSubclaim { /// accumulator witness when performing ACC = ACC + (X^{-a_u} + 1) * ACC * RGSW(Z_u) pub struct AccumulatorWitness { /// * Witness when performing input_rlwe_ntt := (X^{-a_u} + 1) * ACC + /// /// accumulator of ntt form pub accumulator_ntt: RlweCiphertext, /// scalar d = (X^{-a_u} + 1) of coefficient form @@ -56,7 +57,9 @@ pub struct AccumulatorWitness { /// result d * ACC of ntt form pub input_rlwe_ntt: RlweCiphertext, /// * Witness when performing output_rlwe_ntt := input_rlwe * RGSW(Z_u) where input_rlwe = (X^{-a_u} + 1) * ACC + /// /// result d * ACC of coefficient form + /// /// rlwe = (a, b): store the input ciphertext (a, b) where a and b are two polynomials represented by N coefficients. pub input_rlwe: RlweCiphertext, /// bits_rlwe = (a_bits, b_bits): a_bits (b_bits) corresponds to the bit decomposition result of a (b) in the input rlwe ciphertext @@ -71,7 +74,7 @@ pub struct AccumulatorWitness { pub output_rlwe_ntt: RlweCiphertext, } -/// Store the ntt instance, bit decomposition instance, and the sumcheck instance for an Accumulator upating t times +/// Store the ntt instance, bit decomposition instance, and the sumcheck instance for an Accumulator updating `t` times pub struct AccumulatorInstance { /// number of updations in Accumulator denoted by t pub num_updations: usize, diff --git a/zkp/src/piop/ntt/mod.rs b/zkp/src/piop/ntt/mod.rs index 7e98e4e1..33dbe9c4 100644 --- a/zkp/src/piop/ntt/mod.rs +++ b/zkp/src/piop/ntt/mod.rs @@ -128,7 +128,7 @@ impl IntermediateMLEs { } /// Generate MLE for the Fourier function F(u, x) for x \in \{0, 1\}^dim where u is the random point. -/// Dynamic programming implementaion for initializing F(u, x) in NTT (derived from zkCNN: https://eprint.iacr.org/2021/673) +/// Dynamic programming implementation for initializing F(u, x) in NTT (derived from zkCNN: https://eprint.iacr.org/2021/673) /// `N` is the dimension of the vector used to represent the polynomial in NTT. /// /// In NTT, the Fourier matrix is different since we choose these points: ω^1, ω^3, ..., ω^{2N-1} @@ -140,8 +140,11 @@ impl IntermediateMLEs { /// Hence, the final equation is F(u, x) = \prod_{i=0}^{\log{N-1}} ((1 - u_i) + u_i * ω^{2^{i + 1} * X}) * ω^{2^i * x_i} /// /// * In order to comprehend this implementation, it is strongly recommended to read the pure version `naive_init_fourier_table` and `init_fourier_table` in the `ntt_bare.rs`. +/// /// `naive_init_fourier_table` shows the original formula of this algorithm. +/// /// `init_fourier_table` shows the dynamic programming version of this algorithm. +/// /// `init_fourier_table_overall` (this function) stores many intermediate evaluations for the ease of the delegation of F(u, v) /// /// # Arguments @@ -228,12 +231,15 @@ pub fn naive_w_power_times_x_table( } /// Evaluate the mle w^{2^exp * x} for a random point r \in F^{x_dim} -/// This formula is also derived from the techniques in zkCNN: https://eprint.iacr.org/2021/673. +/// This formula is also derived from the techniques in [zkCNN](https://eprint.iacr.org/2021/673). +/// /// w^{2^exp * r} = \sum_x eq(x, r) * w^{2^exp * x} /// = \prod_i (1 - r_i + r_i * w^{2^ {(exp + i) % log_m}) +/// /// * Note that the above equation only holds for exp <= logM - x_dim; /// * otherwise, the exponent 2^exp * x involves a modular addition, disabling the decomposition. -/// (Although I am not clearly making it out, the experiement result shows the above argument.) +/// +/// (Although I am not clearly making it out, the experiment result shows the above argument.) /// /// # Arguments: /// @@ -272,7 +278,7 @@ impl NTTInstance { } } - /// Constuct a new instance from vector + /// Construct a new instance from vector #[inline] pub fn from_vec( log_n: usize, @@ -288,7 +294,7 @@ impl NTTInstance { } } - /// Constuct a new instance from slice + /// Construct a new instance from slice #[inline] pub fn from_slice( log_n: usize, @@ -304,7 +310,7 @@ impl NTTInstance { } } - /// Constuct a new instance from given info + /// Construct a new instance from given info #[inline] pub fn from_info(info: &NTTInstanceInfo) -> Self { Self { diff --git a/zkp/src/piop/ntt/ntt_bare.rs b/zkp/src/piop/ntt/ntt_bare.rs index 80faf6ac..c9637f66 100644 --- a/zkp/src/piop/ntt/ntt_bare.rs +++ b/zkp/src/piop/ntt/ntt_bare.rs @@ -69,7 +69,9 @@ pub struct NTTBareSubclaim { /// # Arguments /// * u: the random point /// * ntt_table: It stores the NTT table: ω^0, ω^1, ..., ω^{2N - 1} +/// /// In order to delegate the computation F(u, v) to prover, we decompose the ω^X term into the grand product. +/// /// Hence, the final equation is = \prod_{i=0}^{\log{N-1}} ((1 - u_i) + u_i * ω^{2^{i + 1} * X}) * ω^{2^i * x_i} pub fn naive_init_fourier_table( u: &[F], @@ -94,7 +96,7 @@ pub fn naive_init_fourier_table( } /// Generate MLE for the Fourier function F(u, x) for x \in \{0, 1\}^dim where u is the random point. -/// Dynamic programming implementaion for initializing F(u, x) in NTT (derived from zkCNN: https://eprint.iacr.org/2021/673) +/// Dynamic programming implementation for initializing F(u, x) in NTT (derived from zkCNN: https://eprint.iacr.org/2021/673) /// `N` is the dimension of the vector used to represent the polynomial in NTT. /// /// In NTT, the Fourier matrix is different since we choose these points: ω^1, ω^3, ..., ω^{2N-1} diff --git a/zkp/src/piop/rlwe_mul_rgsw.rs b/zkp/src/piop/rlwe_mul_rgsw.rs index 1bff224b..6de90768 100644 --- a/zkp/src/piop/rlwe_mul_rgsw.rs +++ b/zkp/src/piop/rlwe_mul_rgsw.rs @@ -44,12 +44,12 @@ use rand_distr::Distribution; use super::bit_decomposition::{BitDecomposition, BitDecompositionProof, BitDecompositionSubClaim}; use super::ntt::{NTTProof, NTTSubclaim}; use super::{DecomposedBits, DecomposedBitsInfo, NTTInstance, NTTInstanceInfo, NTTIOP}; -/// SNARKs for Mutliplication between RLWE ciphertext and RGSW ciphertext +/// SNARKs for Multiplication between RLWE ciphertext and RGSW ciphertext pub struct RlweMultRgswIOP(PhantomData); /// proof generated by prover pub struct RlweMultRgswProof { - /// proof for bit decompostion + /// proof for bit decomposition pub bit_decomposition_proof: BitDecompositionProof, /// proof for ntt pub ntt_proof: NTTProof, @@ -106,7 +106,7 @@ impl RlweCiphertexts { } } -/// Stores the multiplicaton instance between RLWE ciphertext and RGSW ciphertext with the corresponding NTT table +/// Stores the multiplication instance between RLWE ciphertext and RGSW ciphertext with the corresponding NTT table /// Given (a, b) \in RLWE where a and b are two polynomials represented by N coefficients, /// and (c, f) \in RGSW = RLWE' \times RLWE' = (RLWE, ..., RLWE) \times (RLWE, ..., RLWE) where c = ((c0, c0'), ..., (ck-1, ck-1')) and f = ((f0, f0'), ..., (fk-1, fk-1')) pub struct RlweMultRgswInstance { @@ -207,7 +207,7 @@ impl RlweMultRgswSubclaim { /// verify the subclaim /// /// # Arguments - /// * `u`: random point choosen by verifier + /// * `u`: random point chosen by verifier /// * `randomness_ntt`: randomness used for combining a batch of ntt instances into a single one /// * `ntt_coeffs`: coefficient form of the randomized ntt instance /// * `ntt_points`: point-value form of the randomized ntt instance diff --git a/zkp/src/piop/round.rs b/zkp/src/piop/round.rs index d90fdd9f..cbae741d 100644 --- a/zkp/src/piop/round.rs +++ b/zkp/src/piop/round.rs @@ -62,7 +62,7 @@ pub struct RoundInstance { pub input: Rc>, /// output denoted by b \in F_q pub output: Rc>, - /// decomposed bits of ouput used for range check + /// decomposed bits of output used for range check pub output_bits: DecomposedBits, /// offset denoted by c = a - b * k \in [1, k] such that c - 1 \in [0, k) diff --git a/zkp/src/sumcheck/prover.rs b/zkp/src/sumcheck/prover.rs index 66d91889..b0de941b 100644 --- a/zkp/src/sumcheck/prover.rs +++ b/zkp/src/sumcheck/prover.rs @@ -25,7 +25,7 @@ pub struct ProverState { /// Stores the list of products that is meant to be added together. /// Each multiplicand is represented by the index in flattened_ml_extensions pub list_of_products: Vec<(F, Vec)>, - /// Stores the linear operations, each of which is successively (in the same order) perfomed over the each MLE of each product stored in the above `products` + /// Stores the linear operations, each of which is successively (in the same order) performed over the each MLE of each product stored in the above `products` /// so each (a: F, b: F) can used to wrap a linear operation over the original MLE f, i.e. a \cdot f + b pub linear_ops: Vec>, /// Stores a list of multilinear extensions in which `self.list_of_products` point to diff --git a/zkp/tests/test_accumulator.rs b/zkp/tests/test_accumulator.rs index d801f8bc..d1b586f5 100644 --- a/zkp/tests/test_accumulator.rs +++ b/zkp/tests/test_accumulator.rs @@ -81,7 +81,7 @@ fn sort_array_with_reversed_bits(input: &[F], log_n: u32) -> Ve output } -/// Invoke the existing api to perform ntt transform and convert the bit-reversed order to normal oder +/// Invoke the existing api to perform ntt transform and convert the bit-reversed order to normal order /// ```plain /// normal order: 0 1 2 3 4 5 6 7 /// diff --git a/zkp/tests/test_addition_in_zq.rs b/zkp/tests/test_addition_in_zq.rs index 09092744..137a20b0 100644 --- a/zkp/tests/test_addition_in_zq.rs +++ b/zkp/tests/test_addition_in_zq.rs @@ -63,7 +63,7 @@ fn test_trivial_addition_in_zq() { .iter() .map(|x| x.get_decomposed_mles(base_len, bits_len)) .collect(); - let abd_bits_ref: Vec<_> = abc_bits.iter().collect(); + let abc_bits_ref: Vec<_> = abc_bits.iter().collect(); let abc_instance = AdditionInZqInstance::from_slice(&abc, &k, q, base, base_len, bits_len); let addition_info = abc_instance.info(); @@ -72,7 +72,7 @@ fn test_trivial_addition_in_zq() { let proof = AdditionInZq::prove(&abc_instance, &u); let subclaim = AdditionInZq::verify(&proof, &addition_info.decomposed_bits_info); - assert!(subclaim.verify_subclaim(q, &abc, k.as_ref(), &abd_bits_ref, &u, &addition_info)); + assert!(subclaim.verify_subclaim(q, &abc, k.as_ref(), &abc_bits_ref, &u, &addition_info)); } #[test] diff --git a/zkp/tests/test_ntt.rs b/zkp/tests/test_ntt.rs index 7c625cbb..528808d1 100644 --- a/zkp/tests/test_ntt.rs +++ b/zkp/tests/test_ntt.rs @@ -72,7 +72,7 @@ fn sort_array_with_reversed_bits(input: &[F], log_n: u32) -> Ve output } -/// Invoke the existing api to perform ntt transform and convert the bit-reversed order to normal oder +/// Invoke the existing api to perform ntt transform and convert the bit-reversed order to normal order /// In other words, the orders of input and output are both normal order. /// ```plain /// normal order: 0 1 2 3 4 5 6 7 @@ -86,7 +86,7 @@ fn ntt_transform_normal_order(log_n: u32, coeff: &[F]) -> V sort_array_with_reversed_bits(&ntt_form, log_n) } -/// Invoke the existing api to perform ntt inverse transform and convert the bit-reversed order to normal oder +/// Invoke the existing api to perform ntt inverse transform and convert the bit-reversed order to normal order /// In other words, the orders of input and output are both normal order. fn ntt_inverse_transform_normal_order(log_n: u32, points: &[F]) -> Vec { assert_eq!(points.len(), (1 << log_n) as usize); @@ -98,7 +98,7 @@ fn ntt_inverse_transform_normal_order(log_n: u32, points: & .data() } -/// Construct the fourier matrix and then compute the matrix-vector product with the coefficents. +/// Construct the fourier matrix and then compute the matrix-vector product with the coefficients. /// The output is in the normal order: f(w), f(w^3), f(w^5), ..., f(w^{2n-1}) fn naive_ntt_transform_normal_order(log_n: u32, coeff: &[FF]) -> Vec { assert_eq!(coeff.len(), (1 << log_n) as usize); diff --git a/zkp/tests/test_rlwe_mult_rgsw.rs b/zkp/tests/test_rlwe_mult_rgsw.rs index 0e2c14a3..6e75f3ce 100644 --- a/zkp/tests/test_rlwe_mult_rgsw.rs +++ b/zkp/tests/test_rlwe_mult_rgsw.rs @@ -47,7 +47,7 @@ fn sort_array_with_reversed_bits(input: &[F], log_n: u32) -> Ve output } -/// Invoke the existing api to perform ntt transform and convert the bit-reversed order to normal oder +/// Invoke the existing api to perform ntt transform and convert the bit-reversed order to normal order /// ```plain /// normal order: 0 1 2 3 4 5 6 7 ///