diff --git a/.github/scripts/build-server.sh b/.github/scripts/build-server.sh
index 120a9065a4..16ebcf0ebc 100755
--- a/.github/scripts/build-server.sh
+++ b/.github/scripts/build-server.sh
@@ -6,8 +6,7 @@ environment=$1
aws s3 sync .git s3://tlsn-deploy/$environment/.git --delete
-cd notary-server
-cargo build --release
-aws s3 cp target/release/notary-server s3://tlsn-deploy/$environment/
+cargo build -p notary-server --release
+aws s3 cp ./target/release/notary-server s3://tlsn-deploy/$environment/
exit 0
diff --git a/.github/scripts/deploy-server.sh b/.github/scripts/deploy-server.sh
index 844bc6be3e..ff86ca1742 100755
--- a/.github/scripts/deploy-server.sh
+++ b/.github/scripts/deploy-server.sh
@@ -4,11 +4,11 @@ set -ex
environment=$1
branch=$2
-INSTANCE_ID=$(aws ec2 describe-instances --filters Name=tag:Name,Values=[tlsnotary-backend] --query "Reservations[*].Instances[*][InstanceId]" --output text)
+INSTANCE_ID=$(aws ec2 describe-instances --filters Name=tag:Name,Values=[tlsnotary-backend-v1] Name=instance-state-name,Values=[running] --query "Reservations[*].Instances[*][InstanceId]" --output text)
aws ec2 create-tags --resources $INSTANCE_ID --tags "Key=$environment,Value=$branch"
COMMIT_HASH=$(git rev-parse HEAD)
-DEPLOY_ID=$(aws deploy create-deployment --application-name tlsn-$environment --deployment-group-name tlsn-$environment-group --github-location repository=$GITHUB_REPOSITORY,commitId=$COMMIT_HASH --ignore-application-stop-failures --file-exists OVERWRITE --output text)
+DEPLOY_ID=$(aws deploy create-deployment --application-name tlsn-$environment-v1 --deployment-group-name tlsn-$environment-v1-group --github-location repository=$GITHUB_REPOSITORY,commitId=$COMMIT_HASH --ignore-application-stop-failures --file-exists OVERWRITE --output text)
while true; do
STATUS=$(aws deploy get-deployment --deployment-id $DEPLOY_ID --query 'deploymentInfo.status' --output text)
diff --git a/.github/scripts/modify-proxy.sh b/.github/scripts/modify-proxy.sh
new file mode 100755
index 0000000000..921b1641f3
--- /dev/null
+++ b/.github/scripts/modify-proxy.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+# This script is triggered by Deploy server workflow in order to send an execution command of cd-scripts/modify_proxy.sh via AWS SSM to the proxy server
+
+set -e
+
+GH_OWNER="tlsnotary"
+GH_REPO="tlsn"
+BACKEND_INSTANCE_ID=$(aws ec2 describe-instances --filters Name=tag:Name,Values=[tlsnotary-backend-v1] Name=instance-state-name,Values=[running] --query "Reservations[*].Instances[*][InstanceId]" --output text)
+PROXY_INSTANCE_ID=$(aws ec2 describe-instances --filters Name=tag:Name,Values=[tlsnotary-web] Name=instance-state-name,Values=[running] --query "Reservations[*].Instances[*][InstanceId]" --output text)
+TAGS=$(aws ec2 describe-instances --instance-ids $BACKEND_INSTANCE_ID --query 'Reservations[*].Instances[*].Tags')
+
+TAG=$(echo $TAGS | jq -r '.[][][] | select(.Key == "stable").Value')
+PORT=$(echo $TAGS | jq -r '.[][][] | select(.Key == "port").Value')
+
+COMMAND_ID=$(aws ssm send-command --document-name "AWS-RunRemoteScript" --instance-ids $PROXY_INSTANCE_ID --parameters '{"sourceType":["GitHub"],"sourceInfo":["{\"owner\":\"'${GH_OWNER}'\", \"repository\":\"'${GH_REPO}'\", \"getOptions\":\"branch:'${TAG}'\", \"path\": \"cd-scripts\"}"],"commandLine":["modify_proxy.sh '${PORT}' '${TAG}' "]}' --output text --query "Command.CommandId")
+
+while true; do
+ SSM_STATUS=$(aws ssm list-command-invocations --command-id $COMMAND_ID --details --query "CommandInvocations[].Status" --output text)
+
+ if [ $SSM_STATUS != "Success" ] && [ $SSM_STATUS != "InProgress" ]; then
+ echo "Proxy modification failed"
+ aws ssm list-command-invocations --command-id $COMMAND_ID --details --query "CommandInvocations[].CommandPlugins[].{Status:Status,Output:Output}"
+ exit 1
+ elif [ $SSM_STATUS = "Success" ]; then
+ aws ssm list-command-invocations --command-id $COMMAND_ID --details --query "CommandInvocations[].CommandPlugins[].{Status:Status,Output:Output}"
+ echo "Success"
+ break
+ fi
+
+ sleep 2
+done
+
+exit 0
diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml
new file mode 100644
index 0000000000..61a1eac9c7
--- /dev/null
+++ b/.github/workflows/bench.yml
@@ -0,0 +1,27 @@
+name: Run Benchmarks
+on:
+ # manual trigger
+ workflow_dispatch:
+
+jobs:
+ run-benchmarks:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Build Docker Image
+ run: |
+ docker build -t tlsn-bench . -f ./crates/benches/benches.Dockerfile
+
+ - name: Run Benchmarks
+ run: |
+ docker run --privileged -v ${{ github.workspace }}/crates/benches/:/benches tlsn-bench
+
+ - name: Upload runtime_vs_latency.html
+ uses: actions/upload-artifact@v4
+ with:
+ name: benchmark_graphs
+ path: |
+ ./crates/benches/runtime_vs_latency.html
+ ./crates/benches/runtime_vs_bandwidth.html
diff --git a/.github/workflows/cd-server.yml b/.github/workflows/cd-server.yml
index 562f6b9058..123dc8cc94 100644
--- a/.github/workflows/cd-server.yml
+++ b/.github/workflows/cd-server.yml
@@ -44,13 +44,13 @@ jobs:
exit 1
fi
- - name: Wait for test workflow to succeed
+ - name: Wait for integration test workflow to succeed
if: github.event_name == 'push'
uses: lewagon/wait-on-check-action@v1.3.1
with:
ref: ${{ github.ref }}
- # Have to be specify '(notary-server)', as we are using matrix for build_and_test job in ci.yml, else it will fail, more details [here](https://github.com/lewagon/wait-on-check-action#check-name)
- check-name: 'Build and test (notary-server)'
+ # More details [here](https://github.com/lewagon/wait-on-check-action#check-name)
+ check-name: 'Run tests release build'
repo-token: ${{ secrets.GITHUB_TOKEN }}
# How frequent (in seconds) this job will call GitHub API to check the status of the job specified at 'check-name'
wait-interval: 60
@@ -71,12 +71,6 @@ jobs:
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
- components: clippy
-
- - name: Use caching
- uses: Swatinem/rust-cache@v2.5.0
- with:
- workspaces: ${{ matrix.package }} -> target
- name: Cargo build
run: |
@@ -85,3 +79,8 @@ jobs:
- name: Trigger Deployment
run: |
.github/scripts/deploy-server.sh ${{ steps.manipulate.outputs.env }} $GITHUB_REF_NAME
+
+ - name: Modify Proxy
+ if: ${{ steps.manipulate.outputs.env == 'stable' }}
+ run: |
+ .github/scripts/modify-proxy.sh
diff --git a/.github/workflows/cd.yml b/.github/workflows/cd.yml
index 9779e52426..60767652ad 100644
--- a/.github/workflows/cd.yml
+++ b/.github/workflows/cd.yml
@@ -16,18 +16,18 @@ jobs:
contents: read
packages: write
steps:
- - name: Wait for test workflow to succeed
+ - name: Wait for integration test workflow to succeed
uses: lewagon/wait-on-check-action@v1.3.1
with:
ref: ${{ github.ref }}
- # Have to be specify '(notary-server)', as we are using matrix for build_and_test job in ci.yml, else it will fail, more details [here](https://github.com/lewagon/wait-on-check-action#check-name)
- check-name: 'Build and test (notary-server)'
+ # More details [here](https://github.com/lewagon/wait-on-check-action#check-name)
+ check-name: 'Run tests release build'
repo-token: ${{ secrets.GITHUB_TOKEN }}
# How frequent (in seconds) this job will call GitHub API to check the status of the job specified at 'check-name'
wait-interval: 60
- name: Checkout repository
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Log in to the Container registry
uses: docker/login-action@v2
@@ -49,4 +49,4 @@ jobs:
push: true
tags: ${{ steps.meta-notary-server.outputs.tags }}
labels: ${{ steps.meta-notary-server.outputs.labels }}
- file: ./notary-server/notary-server.Dockerfile
+ file: ./crates/notary/server/notary-server.Dockerfile
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 27197dfdfb..3ab2a2f6de 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -13,49 +13,39 @@ on:
env:
CARGO_TERM_COLOR: always
CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse
+ # We need a higher number of parallel rayon tasks than the default (which is 4)
+ # in order to prevent a deadlock, c.f.
+ # - https://github.com/tlsnotary/tlsn/issues/548
+ # - https://github.com/privacy-scaling-explorations/mpz/issues/178
+ # 32 seems to be big enough for the foreseeable future
+ RAYON_NUM_THREADS: 32
jobs:
- build_and_test:
- name: Build and test
- if: ( ! github.event.pull_request.draft )
+ fmt:
+ name: Check formatting
runs-on: ubuntu-latest
- strategy:
- fail-fast: false
- matrix:
- package:
- - components/integration-tests
- - components/uid-mux
- - components/cipher
- - components/universal-hash
- - components/aead
- - components/key-exchange
- - components/point-addition
- - components/prf
- - components/tls
- - tlsn
- - notary-server
- include:
- - package: components/integration-tests
- release: true
- - package: notary-server
- release: true
- - package: tlsn
- all-features: true
- defaults:
- run:
- working-directory: ${{ matrix.package }}
steps:
- name: Checkout repository
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
+ # We use nightly to support `imports_granularity` feature
- name: Install nightly rust toolchain with rustfmt
uses: dtolnay/rust-toolchain@stable
with:
toolchain: nightly
components: rustfmt
- - name: "Check formatting"
+ - name: Use caching
+ uses: Swatinem/rust-cache@v2.7.3
+
+ - name: Check formatting
run: cargo +nightly fmt --check --all
+ build-and-test:
+ name: Build and test
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
- name: Install stable rust toolchain
uses: dtolnay/rust-toolchain@stable
@@ -63,32 +53,89 @@ jobs:
toolchain: stable
components: clippy
- - name: "Clippy"
- run: cargo clippy --all-features --examples -- -D warnings
-
- name: Use caching
- uses: Swatinem/rust-cache@v2.5.0
+ uses: Swatinem/rust-cache@v2.7.3
+
+ - name: Clippy
+ run: cargo clippy --all-features --all-targets -- -D warnings
+
+ - name: Build
+ run: cargo build --all-targets
+
+ - name: Test
+ run: cargo test
+ build-wasm:
+ name: Build and test wasm
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Install stable rust toolchain
+ uses: dtolnay/rust-toolchain@stable
+ with:
+ targets: wasm32-unknown-unknown
+ toolchain: stable
+
+ - name: Install nightly rust toolchain
+ uses: dtolnay/rust-toolchain@stable
with:
- workspaces: ${{ matrix.package }} -> target
+ targets: wasm32-unknown-unknown,x86_64-unknown-linux-gnu
+ toolchain: nightly
+ components: rust-src
+
+ - name: Install chromedriver
+ run: |
+ sudo apt-get update
+ sudo apt-get install -y chromium-chromedriver
- - name: "Build"
- run: cargo build ${{ matrix.release && '--release' }}
+ - name: Install wasm-pack
+ run: curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh
+
+ - name: Use caching
+ uses: Swatinem/rust-cache@v2.7.3
- - name: "Test"
- if: ${{ matrix.release != true }}
- run: cargo test --lib --bins --tests --examples --workspace
+ - name: Run tests
+ run: |
+ cd crates/wasm-test-runner
+ ./run.sh
+ tests-integration:
+ name: Run tests release build
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
- - name: "Test all features"
- if: ${{ matrix.release != true && matrix.all-features == true }}
- run: cargo test --lib --bins --tests --examples --workspace --all-features
+ - name: Install stable rust toolchain
+ uses: dtolnay/rust-toolchain@stable
+ with:
+ toolchain: stable
- - name: "Integration Test"
- if: ${{ matrix.release == true }}
- run: cargo test --release --tests
+ - name: Use caching
+ uses: Swatinem/rust-cache@v2.7.3
- - name: "Integration Test all features"
- if: ${{ matrix.release == true && matrix.all-features == true }}
- run: cargo test --release --tests --all-features
+ - name: Add custom DNS entry to /etc/hosts for notary TLS test
+ run: echo "127.0.0.1 tlsnotaryserver.io" | sudo tee -a /etc/hosts
- - name: "Check that benches compile"
- run: cargo bench --no-run
+ - name: Run integration tests
+ run: cargo test --profile tests-integration --workspace --exclude tlsn-tls-client --exclude tlsn-tls-core -- --include-ignored
+ coverage:
+ runs-on: ubuntu-latest
+ env:
+ CARGO_TERM_COLOR: always
+ steps:
+ - uses: actions/checkout@v4
+ - name: Install stable rust toolchain
+ uses: dtolnay/rust-toolchain@stable
+ with:
+ toolchain: stable
+ - name: Install cargo-llvm-cov
+ uses: taiki-e/install-action@cargo-llvm-cov
+ - name: Generate code coverage
+ run: cargo llvm-cov --all-features --workspace --lcov --output-path lcov.info
+ - name: Upload coverage to Codecov
+ uses: codecov/codecov-action@v4
+ with:
+ token: ${{ secrets.CODECOV_TOKEN }}
+ files: lcov.info
+ fail_ci_if_error: true
\ No newline at end of file
diff --git a/.github/workflows/rebase.yml b/.github/workflows/rebase.yml
new file mode 100644
index 0000000000..d28b07aac8
--- /dev/null
+++ b/.github/workflows/rebase.yml
@@ -0,0 +1,24 @@
+name: Automatic Rebase
+on:
+ issue_comment:
+ types: [created]
+jobs:
+ rebase:
+ name: Rebase
+ runs-on: ubuntu-latest
+ if: >-
+ github.event.issue.pull_request != '' &&
+ contains(github.event.comment.body, '/rebase') &&
+ github.event.comment.author_association == 'MEMBER'
+ steps:
+ - name: Checkout the latest code
+ uses: actions/checkout@v4
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
+ fetch-depth: 0 # otherwise, you will fail to push refs to dest repo
+ - name: Automatic Rebase
+ uses: cirrus-actions/rebase@1.8
+ with:
+ autosquash: false
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/rustdoc.yml b/.github/workflows/rustdoc.yml
index 11fd84c022..5c86ee6cd8 100644
--- a/.github/workflows/rustdoc.yml
+++ b/.github/workflows/rustdoc.yml
@@ -12,10 +12,9 @@ env:
jobs:
rustdoc:
- if: ( ! github.event.pull_request.draft )
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
- name: Install Rust Toolchain (Stable)
uses: dtolnay/rust-toolchain@stable
@@ -23,18 +22,18 @@ jobs:
toolchain: stable
- name: "rustdoc"
- run: cd tlsn; cargo doc -p tlsn-core -p tlsn-prover -p tlsn-verifier --no-deps --all-features
+ run: cargo doc -p tlsn-core -p tlsn-prover -p tlsn-verifier --no-deps --all-features
# --target-dir ${GITHUB_WORKSPACE}/docs
# https://dev.to/deciduously/prepare-your-rust-api-docs-for-github-pages-2n5i
- name: "Add index file -> tlsn_prover"
run: |
- echo "" > tlsn/target/doc/index.html
+ echo "" > target/doc/index.html
- name: Deploy
uses: peaceiris/actions-gh-pages@v3
if: ${{ github.ref == 'refs/heads/dev' }}
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
- publish_dir: tlsn/target/doc/
+ publish_dir: target/doc/
# cname: rustdocs.tlsnotary.org
diff --git a/.github/workflows/wasm.yml b/.github/workflows/wasm.yml
deleted file mode 100644
index 6b87afb181..0000000000
--- a/.github/workflows/wasm.yml
+++ /dev/null
@@ -1,47 +0,0 @@
-name: wasm-build
-
-on:
- push:
- branches:
- - dev
- tags:
- - "[v]?[0-9]+.[0-9]+.[0-9]+*"
- pull_request:
- branches:
- - dev
-
-env:
- CARGO_TERM_COLOR: always
- CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse
-
-jobs:
- build_and_test:
- name: Build for target wasm32-unknown-unknown
- runs-on: ubuntu-latest
- strategy:
- fail-fast: false
- matrix:
- package:
- - tlsn/tlsn-core
- - tlsn/tlsn-prover
- - components/tls/tls-client
- defaults:
- run:
- working-directory: ${{ matrix.package }}
- steps:
- - name: Checkout repository
- uses: actions/checkout@v3
-
- - name: Install stable rust toolchain
- uses: dtolnay/rust-toolchain@stable
- with:
- targets: wasm32-unknown-unknown
- toolchain: stable
-
- - name: Use caching
- uses: Swatinem/rust-cache@v2.5.0
- with:
- workspaces: ${{ matrix.package }} -> ../target
-
- - name: "Build"
- run: cargo build --target wasm32-unknown-unknown
diff --git a/.gitignore b/.gitignore
index c169048c8c..f79cb086da 100644
--- a/.gitignore
+++ b/.gitignore
@@ -30,3 +30,6 @@ Cargo.lock
# logs
*.log
+
+# metrics
+*.csv
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
deleted file mode 100644
index ef88a6b9c9..0000000000
--- a/CHANGELOG.md
+++ /dev/null
@@ -1,7 +0,0 @@
-# Changelog
-All notable changes to this project will be documented in this file.
-
-The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
-and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
-
-## [Unreleased]
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 4147bd9727..ea212bbdbc 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -16,27 +16,20 @@ keywords.
Try to do one pull request per change.
-### Updating the changelog
+## Linting
-Update the changes you have made in
-[CHANGELOG](CHANGELOG.md)
-file under the **Unreleased** section.
+Before a Pull Request (PR) can be merged, the Continuous Integration (CI) pipeline automatically lints all code using [Clippy](https://doc.rust-lang.org/stable/clippy/usage.html). To ensure your code is free of linting issues before creating a PR, run the following command:
-Add the changes of your pull request to one of the following subsections,
-depending on the types of changes defined by
-[Keep a changelog](https://keepachangelog.com/en/1.0.0/):
-
-- `Added` for new features.
-- `Changed` for changes in existing functionality.
-- `Deprecated` for soon-to-be removed features.
-- `Removed` for now removed features.
-- `Fixed` for any bug fixes.
-- `Security` in case of vulnerabilities.
+```sh
+cargo clippy --all-features --all-targets -- -D warnings
+```
-If the required subsection does not exist yet under **Unreleased**, create it!
+This command will lint your code with all features and targets enabled, and treat any warnings as errors, ensuring that your code meets the required standards.
## Style
+This repository includes a `rustfmt.toml` file with custom formatting settings that are automatically validated by CI before any Pull Requests (PRs) can be merged. To ensure your code adheres to these standards, format your code using this configuration before submitting a PR. We strongly recommend enabling *auto format on save* to streamline this process. In Visual Studio Code (VSCode), you can enable this feature by turning on [`editor.formatOnSave`](https://code.visualstudio.com/docs/editor/codebasics#_formatting) in the settings.
+
### Capitalization and punctuation
Both line comments and doc comments must be capitalized. Each sentence must end with a period.
@@ -61,6 +54,7 @@ Comments for function arguments must adhere to this pattern:
/// Performs a certain computation. Any other description of the function.
///
/// # Arguments
+///
/// * `arg1` - The first argument.
/// * `arg2` - The second argument.
pub fn compute(...
diff --git a/Cargo.toml b/Cargo.toml
new file mode 100644
index 0000000000..e36d547f73
--- /dev/null
+++ b/Cargo.toml
@@ -0,0 +1,148 @@
+[workspace]
+members = [
+ "crates/benches",
+ "crates/common",
+ "crates/components/aead",
+ "crates/components/authdecode/authdecode",
+ "crates/components/authdecode/authdecode-core",
+ "crates/components/authdecode/transcript",
+ "crates/components/block-cipher",
+ "crates/components/hmac-sha256",
+ "crates/components/hmac-sha256-circuits",
+ "crates/components/key-exchange",
+ "crates/components/poseidon-halo2",
+ "crates/components/stream-cipher",
+ "crates/components/universal-hash",
+ "crates/core",
+ "crates/data-fixtures",
+ "crates/examples",
+ "crates/formats",
+ "crates/notary/client",
+ "crates/notary/server",
+ "crates/notary/tests-integration",
+ "crates/prover",
+ "crates/server-fixture/certs",
+ "crates/server-fixture/server",
+ "crates/tests-integration",
+ "crates/tls/backend",
+ "crates/tls/client",
+ "crates/tls/client-async",
+ "crates/tls/core",
+ "crates/tls/mpc",
+ "crates/tls/server-fixture",
+ "crates/verifier",
+ "crates/wasm",
+ "crates/wasm-test-runner",
+]
+resolver = "2"
+
+[profile.tests-integration]
+inherits = "release"
+opt-level = 1
+
+[workspace.dependencies]
+notary-client = { path = "crates/notary/client" }
+notary-server = { path = "crates/notary/server" }
+poseidon-halo2 = { path = "crates/components/poseidon-halo2" }
+tlsn-authdecode = { path = "crates/components/authdecode/authdecode" }
+tlsn-authdecode-core = { path = "crates/components/authdecode/authdecode-core" }
+tlsn-authdecode-transcript = { path = "crates/components/authdecode/transcript" }
+tls-server-fixture = { path = "crates/tls/server-fixture" }
+tlsn-aead = { path = "crates/components/aead" }
+tlsn-block-cipher = { path = "crates/components/block-cipher" }
+tlsn-common = { path = "crates/common" }
+tlsn-core = { path = "crates/core" }
+tlsn-data-fixtures = { path = "crates/data-fixtures" }
+tlsn-formats = { path = "crates/formats" }
+tlsn-hmac-sha256 = { path = "crates/components/hmac-sha256" }
+tlsn-hmac-sha256-circuits = { path = "crates/components/hmac-sha256-circuits" }
+tlsn-key-exchange = { path = "crates/components/key-exchange" }
+tlsn-prover = { path = "crates/prover" }
+tlsn-server-fixture = { path = "crates/server-fixture/server" }
+tlsn-server-fixture-certs = { path = "crates/server-fixture/certs" }
+tlsn-stream-cipher = { path = "crates/components/stream-cipher" }
+tlsn-tls-backend = { path = "crates/tls/backend" }
+tlsn-tls-client = { path = "crates/tls/client" }
+tlsn-tls-client-async = { path = "crates/tls/client-async" }
+tlsn-tls-core = { path = "crates/tls/core" }
+tlsn-tls-mpc = { path = "crates/tls/mpc" }
+tlsn-universal-hash = { path = "crates/components/universal-hash" }
+tlsn-utils = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "e7b2db6" }
+tlsn-utils-aio = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "e7b2db6" }
+tlsn-verifier = { path = "crates/verifier" }
+
+mpz-circuits = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
+mpz-common = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
+mpz-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
+mpz-garble = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
+mpz-garble-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
+mpz-ole = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
+mpz-ot = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
+mpz-share-conversion = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
+
+serio = { version = "0.1" }
+spansy = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "e7b2db6" }
+uid-mux = { version = "0.1", features = ["serio"] }
+
+aes = { version = "0.8" }
+aes-gcm = { version = "0.9" }
+anyhow = { version = "1.0" }
+async-trait = { version = "0.1" }
+async-tungstenite = { version = "0.25" }
+axum = { version = "0.7" }
+bcs = { version = "0.1" }
+bincode = { version = "1.3" }
+blake3 = { version = "1.5" }
+bytes = { version = "1.4" }
+chrono = { version = "0.4" }
+cipher = { version = "0.4" }
+criterion = { version = "0.5" }
+ctr = { version = "0.9" }
+derive_builder = { version = "0.12" }
+digest = { version = "0.10" }
+elliptic-curve = { version = "0.13" }
+enum-try-as-inner = { version = "0.1" }
+env_logger = { version = "0.10" }
+futures = { version = "0.3" }
+futures-rustls = { version = "0.26" }
+futures-util = { version = "0.3" }
+generic-array = { version = "0.14" }
+hex = { version = "0.4" }
+hmac = { version = "0.12" }
+http = { version = "1.1" }
+http-body-util = { version = "0.1" }
+hyper = { version = "1.1" }
+hyper-util = { version = "0.1" }
+k256 = { version = "0.13" }
+log = { version = "0.4" }
+once_cell = { version = "1.19" }
+opaque-debug = { version = "0.3" }
+p256 = { version = "0.13" }
+pkcs8 = { version = "0.10" }
+pin-project-lite = { version = "0.2" }
+rand = { version = "0.8" }
+rand_chacha = { version = "0.3" }
+rand_core = { version = "0.6" }
+regex = { version = "1.10" }
+ring = { version = "0.17" }
+rs_merkle = { git = "https://github.com/tlsnotary/rs-merkle.git", rev = "85f3e82" }
+rstest = { version = "0.17" }
+rustls = { version = "0.21" }
+rustls-pemfile = { version = "1.0" }
+sct = { version = "0.7" }
+serde = { version = "1.0" }
+serde_json = { version = "1.0" }
+sha2 = { version = "0.10" }
+signature = { version = "2.2" }
+thiserror = { version = "1.0" }
+tokio = { version = "1.38" }
+tokio-rustls = { version = "0.24" }
+tokio-util = { version = "0.7" }
+tracing = { version = "0.1" }
+tracing-subscriber = { version = "0.3" }
+uuid = { version = "1.4" }
+web-time = { version = "0.2" }
+webpki = { version = "0.22" }
+webpki-roots = { version = "0.26" }
+ws_stream_tungstenite = { version = "0.13" }
+zeroize = { version = "1.8" }
diff --git a/README.md b/README.md
index e127563920..1d5bf00596 100644
--- a/README.md
+++ b/README.md
@@ -8,8 +8,8 @@
[mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg
[apache-badge]: https://img.shields.io/github/license/saltstack/salt
-[actions-badge]: https://github.com/tlsnotary/tlsn/actions/workflows/ci.yml/badge.svg
-[actions-url]: https://github.com/tlsnotary/tlsn/actions?query=workflow%3Arust+branch%3Adev
+[actions-badge]: https://github.com/tlsnotary/tlsn/actions/workflows/ci.yml/badge.svg?branch=dev
+[actions-url]: https://github.com/tlsnotary/tlsn/actions?query=workflow%3Aci+branch%3Adev
[Website](https://tlsnotary.org) |
[Documentation](https://docs.tlsnotary.org) |
@@ -18,6 +18,8 @@
# TLSNotary
+**Data provenance and privacy with secure multi-party computation**
+
## ⚠️ Notice
This project is currently under active development and should not be used in production. Expect bugs and regular major breaking changes.
@@ -30,25 +32,44 @@ All crates in this repository are licensed under either of
at your option.
-## Overview
+## Branches
+
+- [`main`](https://github.com/tlsnotary/tlsn/tree/main)
+ - Default branch — points to the latest release.
+ - This is stable and suitable for most users.
+- [`dev`](https://github.com/tlsnotary/tlsn/tree/dev)
+ - Development branch — contains the latest PRs.
+ - Developers should submit their PRs against this branch.
+
+## Directory
+
+- [examples](./crates/examples/): Examples on how to use the TLSNotary protocol.
+- [tlsn-prover](./crates/prover/): The library for the prover component.
+- [tlsn-verifier](./crates/verifier/): The library for the verifier component.
+- [notary](./crates/notary/): Implements the [notary server](https://docs.tlsnotary.org/intro.html#tls-verification-with-a-general-purpose-notary) and its client.
+- [components](./crates/components/): Houses low-level libraries.
+
+This repository contains the source code for the Rust implementation of the TLSNotary protocol. For additional tools and implementations related to TLSNotary, visit . This includes repositories such as [`tlsn-js`](https://github.com/tlsnotary/tlsn-js), [`tlsn-extension`](https://github.com/tlsnotary/tlsn-extension), [`explorer`](https://github.com/tlsnotary/explorer), among others.
-- **tls**: Home of the TLS logic of our protocol like handshake en-/decryption, ghash, **currently outdated**
-- **utils**: Utility functions which are frequently used everywhere
-- **actors**: Provides actors, which implement protocol-specific functionality using
- the actor pattern. They usually wrap an aio module
-- **universal-hash**: Implements ghash, which is used AES-GCM. Poly-1305 coming soon.
-- **point-addition**: Used in key-exchange and allows to compute a two party sharing of
- an EC curve point
-### General remarks
+## Development
-- the TLSNotary codebase makes heavy use of async Rust. Usually an aio
- crate/module implements the network IO and wraps a core crate/module which
- provides the protocol implementation. This is a frequent pattern you will
- encounter in the codebase.
-- some protocols are implemented using the actor pattern to facilitate
- asynchronous message processing with shared state.
+> [!IMPORTANT]
+> **Note on Rust-to-WASM Compilation**: This project requires compiling Rust into WASM, which needs [`clang`](https://clang.llvm.org/) version 16.0.0 or newer. MacOS users, be aware that Xcode's default `clang` might be older. If you encounter the error `No available targets are compatible with triple "wasm32-unknown-unknown"`, it's likely due to an outdated `clang`. Updating `clang` to a newer version should resolve this issue.
+>
+> For MacOS aarch64 users, if Apple's default `clang` isn't working, try installing `llvm` via Homebrew (`brew install llvm`). You can then prioritize the Homebrew `clang` over the default macOS version by modifying your `PATH`. Add the following line to your shell configuration file (e.g., `.bashrc`, `.zshrc`):
+> ```sh
+> export PATH="/opt/homebrew/opt/llvm/bin:$PATH"
+> ```
+If you run into this error:
+```
+Could not find directory of OpenSSL installation, and this `-sys` crate cannot
+ proceed without this knowledge. If OpenSSL is installed and this crate had
+ trouble finding it, you can set the `OPENSSL_DIR` environment variable for the
+ compilation process.
+```
+Make sure you have the development packages of OpenSSL installed (`libssl-dev` on Ubuntu or `openssl-devel` on Fedora).
## Contribution
@@ -56,4 +77,4 @@ Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
dual licensed as above, without any additional terms or conditions.
-See [CONTRIBUTING.md](CONTRIBUTING.md).
\ No newline at end of file
+See [CONTRIBUTING.md](CONTRIBUTING.md).
diff --git a/appspec-scripts/after_install.sh b/appspec-scripts/after_install.sh
deleted file mode 100755
index 062ad35599..0000000000
--- a/appspec-scripts/after_install.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-set -e
-export PATH=$PATH:/home/ubuntu/.cargo/bin
-
-APP_NAME=$(echo $APPLICATION_NAME | awk -F- '{ print $2 }')
-
-# Prepare directory
-sudo rm -rf ~/$APP_NAME/tlsn
-sudo mv ~/tlsn/ ~/$APP_NAME
-sudo mkdir -p ~/$APP_NAME/tlsn/notary-server/target/release
-sudo chown -R ubuntu.ubuntu ~/$APP_NAME
-
-# Download .git directory
-aws s3 cp s3://tlsn-deploy/$APP_NAME/.git ~/$APP_NAME/tlsn/.git --recursive
-
-# Download binary
-aws s3 cp s3://tlsn-deploy/$APP_NAME/notary-server ~/$APP_NAME/tlsn/notary-server/target/release
-chmod +x ~/$APP_NAME/tlsn/notary-server/target/release/notary-server
-
-exit 0
diff --git a/appspec-scripts/before_install.sh b/appspec-scripts/before_install.sh
deleted file mode 100755
index 07b5380508..0000000000
--- a/appspec-scripts/before_install.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-#set -e
-
-APP_NAME=$(echo $APPLICATION_NAME | awk -F- '{ print $2 }')
-
-if [ ! -d $APP_NAME ]; then
- mkdir ~/$APP_NAME
-fi
-
-exit 0
diff --git a/appspec-scripts/start_app.sh b/appspec-scripts/start_app.sh
deleted file mode 100755
index 08cd49ab22..0000000000
--- a/appspec-scripts/start_app.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-set -e
-export PATH=$PATH:/home/ubuntu/.cargo/bin
-
-APP_NAME=$(echo $APPLICATION_NAME | awk -F- '{ print $2 }')
-
-cd ~/$APP_NAME/tlsn/notary-server
-target/release/notary-server --config-file ~/.notary/$APP_NAME/config.yaml &> ~/$APP_NAME/tlsn/notary.log &
-
-exit 0
diff --git a/appspec-scripts/stop_app.sh b/appspec-scripts/stop_app.sh
deleted file mode 100755
index 3a30a291b1..0000000000
--- a/appspec-scripts/stop_app.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-set -e
-
-APP_NAME=$(echo $APPLICATION_NAME | awk -F- '{ print $2 }')
-
-PID=$(pgrep -f notary.*$APP_NAME)
-kill -15 $PID
-
-exit 0
diff --git a/appspec-scripts/validate_app.sh b/appspec-scripts/validate_app.sh
deleted file mode 100755
index 8efbc9727e..0000000000
--- a/appspec-scripts/validate_app.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-set -e
-
-# Verify proccess is running
-APP_NAME=$(echo $APPLICATION_NAME | awk -F- '{ print $2 }')
-
-pgrep -f notary.*$APP_NAME
-[ $? -eq 0 ] || exit 1
-
-# Verify that listening sockets exist
-if [ "$APPLICATION_NAME" == "tlsn-nightly" ]; then
- port=7048
-else
- port=7047
-fi
-
-exposed_ports=$(netstat -lnt4 | egrep -cw $port)
-[ $exposed_ports -eq 1 ] || exit 1
-
-exit 0
diff --git a/appspec.yml b/appspec.yml
index c6cf353d44..c1f6125f4c 100644
--- a/appspec.yml
+++ b/appspec.yml
@@ -10,22 +10,22 @@ permissions:
group: ubuntu
hooks:
BeforeInstall:
- - location: appspec-scripts/before_install.sh
+ - location: cd-scripts/appspec-scripts/before_install.sh
timeout: 300
runas: ubuntu
AfterInstall:
- - location: appspec-scripts/after_install.sh
+ - location: cd-scripts/appspec-scripts/after_install.sh
timeout: 300
runas: ubuntu
ApplicationStart:
- - location: appspec-scripts/start_app.sh
+ - location: cd-scripts/appspec-scripts/start_app.sh
timeout: 300
runas: ubuntu
ApplicationStop:
- - location: appspec-scripts/stop_app.sh
+ - location: cd-scripts/appspec-scripts/stop_app.sh
timeout: 300
runas: ubuntu
ValidateService:
- - location: appspec-scripts/validate_app.sh
+ - location: cd-scripts/appspec-scripts/validate_app.sh
timeout: 300
runas: ubuntu
diff --git a/build_all.sh b/build_all.sh
deleted file mode 100755
index e6945b7af4..0000000000
--- a/build_all.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-
-for package in components/uid-mux components/actors/actor-ot components/cipher components/universal-hash components/aead components/key-exchange components/point-addition components/prf components/tls tlsn; do
- pushd $package
- # cargo update
- cargo clean
- cargo build
- cargo test
- cargo clippy --all-features -- -D warnings || exit
- popd
-done
diff --git a/cd-scripts/appspec-scripts/after_install.sh b/cd-scripts/appspec-scripts/after_install.sh
new file mode 100755
index 0000000000..a6041c08db
--- /dev/null
+++ b/cd-scripts/appspec-scripts/after_install.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+set -e
+
+TAG=$(curl http://169.254.169.254/latest/meta-data/tags/instance/stable)
+APP_NAME=$(echo $APPLICATION_NAME | awk -F- '{ print $2 }')
+
+if [ $APP_NAME = "stable" ]; then
+ # Prepare directories for stable versions
+ sudo mkdir ~/${APP_NAME}_${TAG}
+ sudo mv ~/tlsn ~/${APP_NAME}_${TAG}
+ sudo mkdir -p ~/${APP_NAME}_${TAG}/tlsn/notary/target/release
+ sudo chown -R ubuntu.ubuntu ~/${APP_NAME}_${TAG}
+
+ # Download .git directory
+ aws s3 cp s3://tlsn-deploy/$APP_NAME/.git ~/${APP_NAME}_${TAG}/tlsn/.git --recursive
+
+ # Download binary
+ aws s3 cp s3://tlsn-deploy/$APP_NAME/notary-server ~/${APP_NAME}_${TAG}/tlsn/notary/target/release
+ chmod +x ~/${APP_NAME}_${TAG}/tlsn/notary/target/release/notary-server
+else
+ # Prepare directory for dev
+ sudo rm -rf ~/$APP_NAME/tlsn
+ sudo mv ~/tlsn/ ~/$APP_NAME
+ sudo mkdir -p ~/$APP_NAME/tlsn/notary/target/release
+ sudo chown -R ubuntu.ubuntu ~/$APP_NAME
+
+ # Download .git directory
+ aws s3 cp s3://tlsn-deploy/$APP_NAME/.git ~/$APP_NAME/tlsn/.git --recursive
+
+ # Download binary
+ aws s3 cp s3://tlsn-deploy/$APP_NAME/notary-server ~/$APP_NAME/tlsn/notary/target/release
+ chmod +x ~/$APP_NAME/tlsn/notary/target/release/notary-server
+fi
+
+exit 0
diff --git a/cd-scripts/appspec-scripts/before_install.sh b/cd-scripts/appspec-scripts/before_install.sh
new file mode 100755
index 0000000000..76a47c6115
--- /dev/null
+++ b/cd-scripts/appspec-scripts/before_install.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+set -e
+
+APP_NAME=$(echo $APPLICATION_NAME | awk -F- '{ print $2 }')
+
+if [ $APP_NAME = "stable" ]; then
+ VERSIONS_DEPLOYED=$(find ~/ -maxdepth 1 -type d -name 'stable_*')
+ VERSIONS_DEPLOYED_COUNT=$(echo $VERSIONS_DEPLOYED | wc -w)
+
+ if [ $VERSIONS_DEPLOYED_COUNT -gt 3 ]; then
+ echo "More than 3 stable versions found"
+ exit 1
+ fi
+else
+ if [ ! -d ~/$APP_NAME ]; then
+ mkdir ~/$APP_NAME
+ fi
+fi
+
+exit 0
diff --git a/cd-scripts/appspec-scripts/start_app.sh b/cd-scripts/appspec-scripts/start_app.sh
new file mode 100755
index 0000000000..0d449f6222
--- /dev/null
+++ b/cd-scripts/appspec-scripts/start_app.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+# Port tagging will also be used to manipulate proxy server via modify_proxy.sh script
+set -ex
+
+TAG=$(curl http://169.254.169.254/latest/meta-data/tags/instance/stable)
+APP_NAME=$(echo $APPLICATION_NAME | awk -F- '{ print $2 }')
+
+if [ $APP_NAME = "stable" ]; then
+ # Check if all stable ports are in use. If true, terminate the deployment
+ [[ $(netstat -lnt4 | egrep -c ':(7047|7057|7067)\s') -eq 3 ]] && { echo "All stable ports are in use"; exit 1; }
+ STABLE_PORTS="7047 7057 7067"
+ for PORT in $STABLE_PORTS; do
+ PORT_LISTENING=$(netstat -lnt4 | egrep -cw $PORT || true)
+ if [ $PORT_LISTENING -eq 0 ]; then
+ ~/${APP_NAME}_${TAG}/tlsn/notary/target/release/notary-server --config-file ~/.notary/${APP_NAME}_${PORT}/config.yaml &> ~/${APP_NAME}_${TAG}/tlsn/notary.log &
+ # Create a tag that will be used for service validation
+ INSTANCE_ID=$(curl http://169.254.169.254/latest/meta-data/instance-id)
+ aws ec2 create-tags --resources $INSTANCE_ID --tags "Key=port,Value=$PORT"
+ break
+ fi
+ done
+else
+ ~/$APP_NAME/tlsn/notary/target/release/notary-server --config-file ~/.notary/$APP_NAME/config.yaml &> ~/$APP_NAME/tlsn/notary.log &
+fi
+
+exit 0
diff --git a/cd-scripts/appspec-scripts/stop_app.sh b/cd-scripts/appspec-scripts/stop_app.sh
new file mode 100755
index 0000000000..ae92a8c06c
--- /dev/null
+++ b/cd-scripts/appspec-scripts/stop_app.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+# AWS CodeDeploy hook sequence: https://docs.aws.amazon.com/codedeploy/latest/userguide/reference-appspec-file-structure-hooks.html#appspec-hooks-server
+set -ex
+
+APP_NAME=$(echo $APPLICATION_NAME | awk -F- '{ print $2 }')
+
+if [ $APP_NAME = "stable" ]; then
+ VERSIONS_DEPLOYED=$(find ~/ -maxdepth 1 -type d -name 'stable_*')
+ VERSIONS_DEPLOYED_COUNT=$(echo $VERSIONS_DEPLOYED | wc -w)
+
+ # Remove oldest version if exists
+ if [ $VERSIONS_DEPLOYED_COUNT -eq 3 ]; then
+ echo "Candidate versions to be removed:"
+ OLDEST_DIR=""
+ OLDEST_TIME=""
+
+ for DIR in $VERSIONS_DEPLOYED; do
+ TIME=$(stat -c %W $DIR)
+
+ if [ -z $OLDEST_TIME ] || [ $TIME -lt $OLDEST_TIME ]; then
+ OLDEST_DIR=$DIR
+ OLDEST_TIME=$TIME
+ fi
+ done
+
+ echo "The oldest version is running under: $OLDEST_DIR"
+ PID=$(lsof $OLDEST_DIR/tlsn/notary/target/release/notary-server | awk '{ print $2 }' | tail -1)
+ kill -15 $PID || true
+ rm -rf $OLDEST_DIR
+ fi
+else
+ PID=$(pgrep -f notary.*$APP_NAME)
+ kill -15 $PID || true
+fi
+
+exit 0
diff --git a/cd-scripts/appspec-scripts/validate_app.sh b/cd-scripts/appspec-scripts/validate_app.sh
new file mode 100755
index 0000000000..3921fd337f
--- /dev/null
+++ b/cd-scripts/appspec-scripts/validate_app.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+set -e
+
+# Verify proccess is running
+APP_NAME=$(echo $APPLICATION_NAME | awk -F- '{ print $2 }')
+
+# Verify that listening sockets exist
+if [ $APP_NAME = "stable" ]; then
+ PORT=$(curl http://169.254.169.254/latest/meta-data/tags/instance/port)
+ ps -ef | grep notary.*$APP_NAME.*$PORT | grep -v grep
+ [ $? -eq 0 ] || exit 1
+else
+ PORT=7048
+ pgrep -f notary.*$APP_NAME
+ [ $? -eq 0 ] || exit 1
+fi
+
+EXPOSED_PORTS=$(netstat -lnt4 | egrep -cw $PORT)
+[ $EXPOSED_PORTS -eq 1 ] || exit 1
+
+exit 0
diff --git a/cd-scripts/modify_proxy.sh b/cd-scripts/modify_proxy.sh
new file mode 100755
index 0000000000..1abb5e5665
--- /dev/null
+++ b/cd-scripts/modify_proxy.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+# This script is executed on proxy side, in order to assign the available port to latest stable version
+set -e
+
+PORT=$1
+VERSION=$2
+
+sed -i "/# Port $PORT/{n;s/v[0-9].[0-9].[0-9]-[a-z]*.[0-9]*/$VERSION/g}" /etc/nginx/sites-available/tlsnotary-pse
+sed -i "/# Port $PORT/{n;n;s/v[0-9].[0-9].[0-9]-[a-z]*.[0-9]*/$VERSION/g}" /etc/nginx/sites-available/tlsnotary-pse
+
+nginx -t
+nginx -s reload
+
+exit 0
diff --git a/components/aead/Cargo.toml b/components/aead/Cargo.toml
deleted file mode 100644
index 546bc9a514..0000000000
--- a/components/aead/Cargo.toml
+++ /dev/null
@@ -1,41 +0,0 @@
-[package]
-name = "tlsn-aead"
-authors = ["TLSNotary Team"]
-description = "This crate provides an implementation of a two-party version of AES-GCM behind an AEAD trait"
-keywords = ["tls", "mpc", "2pc", "aead", "aes", "aes-gcm"]
-categories = ["cryptography"]
-license = "MIT OR Apache-2.0"
-version = "0.1.0-alpha.3"
-edition = "2021"
-
-[lib]
-name = "aead"
-
-[features]
-default = ["mock"]
-mock = []
-tracing = [
- "dep:tracing",
- "tlsn-block-cipher/tracing",
- "tlsn-stream-cipher/tracing",
- "tlsn-universal-hash/tracing",
-]
-
-[dependencies]
-tlsn-block-cipher = { path = "../cipher/block-cipher" }
-tlsn-stream-cipher = { path = "../cipher/stream-cipher" }
-tlsn-universal-hash = { path = "../universal-hash" }
-mpz-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ecb8c54" }
-mpz-garble = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ecb8c54" }
-tlsn-utils-aio = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "8d8ffe1" }
-
-async-trait = "0.1"
-derive_builder = "0.12"
-thiserror = "1"
-futures = "0.3"
-serde = "1"
-tracing = { version = "0.1", optional = true }
-
-[dev-dependencies]
-tokio = { version = "1", features = ["macros", "rt", "rt-multi-thread"] }
-aes-gcm = "0.10"
diff --git a/components/aead/src/aes_gcm/tag.rs b/components/aead/src/aes_gcm/tag.rs
deleted file mode 100644
index f3f3d1c71a..0000000000
--- a/components/aead/src/aes_gcm/tag.rs
+++ /dev/null
@@ -1,64 +0,0 @@
-use serde::{Deserialize, Serialize};
-use std::ops::Add;
-
-use crate::AeadError;
-
-pub(crate) const AES_GCM_TAG_LEN: usize = 16;
-
-#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
-pub(crate) struct AesGcmTagShare(pub(crate) [u8; 16]);
-
-impl AesGcmTagShare {
- pub(crate) fn from_unchecked(share: &[u8]) -> Result {
- if share.len() != 16 {
- return Err(AeadError::ValidationError(
- "Received tag share is not 16 bytes long".to_string(),
- ));
- }
- let mut result = [0u8; 16];
- result.copy_from_slice(share);
- Ok(Self(result))
- }
-}
-
-impl AsRef<[u8]> for AesGcmTagShare {
- fn as_ref(&self) -> &[u8] {
- &self.0
- }
-}
-
-impl Add for AesGcmTagShare {
- type Output = Vec;
-
- fn add(self, rhs: Self) -> Self::Output {
- self.0
- .iter()
- .zip(rhs.0.iter())
- .map(|(a, b)| a ^ b)
- .collect()
- }
-}
-
-/// Builds padded data for GHASH
-#[cfg_attr(feature = "tracing", tracing::instrument(level = "trace", ret))]
-pub(crate) fn build_ghash_data(mut aad: Vec, mut ciphertext: Vec) -> Vec {
- let associated_data_bitlen = (aad.len() as u64) * 8;
- let text_bitlen = (ciphertext.len() as u64) * 8;
-
- let len_block = ((associated_data_bitlen as u128) << 64) + (text_bitlen as u128);
-
- // pad data to be a multiple of 16 bytes
- let aad_padded_block_count = (aad.len() / 16) + (aad.len() % 16 != 0) as usize;
- aad.resize(aad_padded_block_count * 16, 0);
-
- let ciphertext_padded_block_count =
- (ciphertext.len() / 16) + (ciphertext.len() % 16 != 0) as usize;
- ciphertext.resize(ciphertext_padded_block_count * 16, 0);
-
- let mut data: Vec = Vec::with_capacity(aad.len() + ciphertext.len() + 16);
- data.extend(aad);
- data.extend(ciphertext);
- data.extend_from_slice(&len_block.to_be_bytes());
-
- data
-}
diff --git a/components/aead/src/msg.rs b/components/aead/src/msg.rs
deleted file mode 100644
index a15d318633..0000000000
--- a/components/aead/src/msg.rs
+++ /dev/null
@@ -1,29 +0,0 @@
-//! Message types for AEAD protocols.
-
-use serde::{Deserialize, Serialize};
-
-use mpz_core::{commit::Decommitment, hash::Hash};
-
-/// Aead messages.
-#[derive(Debug, Clone, Serialize, Deserialize)]
-#[allow(missing_docs)]
-pub enum AeadMessage {
- TagShareCommitment(Hash),
- TagShareDecommitment(Decommitment),
- TagShare(TagShare),
-}
-
-/// A tag share.
-#[derive(Debug, Clone, Serialize, Deserialize)]
-pub struct TagShare {
- /// The share of the tag.
- pub share: Vec,
-}
-
-impl From for TagShare {
- fn from(tag_share: crate::aes_gcm::AesGcmTagShare) -> Self {
- Self {
- share: tag_share.0.to_vec(),
- }
- }
-}
diff --git a/components/cipher/Cargo.toml b/components/cipher/Cargo.toml
deleted file mode 100644
index f75fa46e89..0000000000
--- a/components/cipher/Cargo.toml
+++ /dev/null
@@ -1,30 +0,0 @@
-[workspace]
-members = ["stream-cipher", "block-cipher"]
-resolver = "2"
-
-[workspace.dependencies]
-# tlsn
-mpz-circuits = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ecb8c54" }
-mpz-garble = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ecb8c54" }
-tlsn-utils = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "8d8ffe1" }
-
-# crypto
-aes = "0.8"
-ctr = "0.9.2"
-cipher = "0.4.3"
-
-# async
-async-trait = "0.1"
-futures = "0.3"
-tokio = { version = "1", default-features = false }
-
-# testing
-rstest = "0.17"
-criterion = "0.5"
-
-# error/log
-thiserror = "1"
-tracing = "0.1"
-
-# misc
-derive_builder = "0.12"
diff --git a/components/cipher/block-cipher/src/cipher.rs b/components/cipher/block-cipher/src/cipher.rs
deleted file mode 100644
index 21d2713606..0000000000
--- a/components/cipher/block-cipher/src/cipher.rs
+++ /dev/null
@@ -1,182 +0,0 @@
-use std::marker::PhantomData;
-
-use async_trait::async_trait;
-
-use mpz_garble::{value::ValueRef, Decode, DecodePrivate, Execute, Memory};
-use utils::id::NestedId;
-
-use crate::{BlockCipher, BlockCipherCircuit, BlockCipherConfig, BlockCipherError};
-
-struct State {
- execution_id: NestedId,
- key: Option,
-}
-
-/// An MPC block cipher
-pub struct MpcBlockCipher
-where
- C: BlockCipherCircuit,
- E: Memory + Execute + Decode + DecodePrivate + Send + Sync,
-{
- state: State,
-
- executor: E,
-
- _cipher: PhantomData,
-}
-
-impl MpcBlockCipher
-where
- C: BlockCipherCircuit,
- E: Memory + Execute + Decode + DecodePrivate + Send + Sync,
-{
- /// Creates a new MPC block cipher
- ///
- /// # Arguments
- ///
- /// * `config` - The configuration for the block cipher
- /// * `executor` - The executor to use for the MPC
- #[cfg_attr(
- feature = "tracing",
- tracing::instrument(level = "info", skip(executor))
- )]
- pub fn new(config: BlockCipherConfig, executor: E) -> Self {
- let execution_id = NestedId::new(&config.id).append_counter();
- Self {
- state: State {
- execution_id,
- key: None,
- },
- executor,
- _cipher: PhantomData,
- }
- }
-}
-
-#[async_trait]
-impl BlockCipher for MpcBlockCipher
-where
- C: BlockCipherCircuit,
- E: Memory + Execute + Decode + DecodePrivate + Send + Sync + Send,
-{
- #[cfg_attr(feature = "tracing", tracing::instrument(level = "info", skip(self)))]
- fn set_key(&mut self, key: ValueRef) {
- self.state.key = Some(key);
- }
-
- #[cfg_attr(
- feature = "tracing",
- tracing::instrument(level = "debug", skip(self, plaintext), err)
- )]
- async fn encrypt_private(&mut self, plaintext: Vec) -> Result, BlockCipherError> {
- let len = plaintext.len();
- let block: C::BLOCK = plaintext
- .try_into()
- .map_err(|_| BlockCipherError::InvalidInputLength(C::BLOCK_LEN, len))?;
-
- let key = self.state.key.clone().ok_or(BlockCipherError::KeyNotSet)?;
-
- let id = self.state.execution_id.increment_in_place().to_string();
-
- let msg = self
- .executor
- .new_private_input::(&format!("{}/msg", &id))?;
- let ciphertext = self
- .executor
- .new_output::(&format!("{}/ciphertext", &id))?;
-
- self.executor.assign(&msg, block)?;
-
- self.executor
- .execute(C::circuit(), &[key, msg], &[ciphertext.clone()])
- .await?;
-
- let mut outputs = self.executor.decode(&[ciphertext]).await?;
-
- let ciphertext: C::BLOCK = if let Ok(ciphertext) = outputs
- .pop()
- .expect("ciphertext should be present")
- .try_into()
- {
- ciphertext
- } else {
- panic!("ciphertext should be a block")
- };
-
- Ok(ciphertext.into())
- }
-
- #[cfg_attr(
- feature = "tracing",
- tracing::instrument(level = "debug", skip(self), err)
- )]
- async fn encrypt_blind(&mut self) -> Result, BlockCipherError> {
- let key = self.state.key.clone().ok_or(BlockCipherError::KeyNotSet)?;
-
- let id = self.state.execution_id.increment_in_place().to_string();
-
- let msg = self
- .executor
- .new_blind_input::(&format!("{}/msg", &id))?;
- let ciphertext = self
- .executor
- .new_output::(&format!("{}/ciphertext", &id))?;
-
- self.executor
- .execute(C::circuit(), &[key, msg], &[ciphertext.clone()])
- .await?;
-
- let mut outputs = self.executor.decode(&[ciphertext]).await?;
-
- let ciphertext: C::BLOCK = if let Ok(ciphertext) = outputs
- .pop()
- .expect("ciphertext should be present")
- .try_into()
- {
- ciphertext
- } else {
- panic!("ciphertext should be a block")
- };
-
- Ok(ciphertext.into())
- }
-
- #[cfg_attr(
- feature = "tracing",
- tracing::instrument(level = "debug", skip(self, plaintext), err)
- )]
- async fn encrypt_share(&mut self, plaintext: Vec) -> Result, BlockCipherError> {
- let len = plaintext.len();
- let block: C::BLOCK = plaintext
- .try_into()
- .map_err(|_| BlockCipherError::InvalidInputLength(C::BLOCK_LEN, len))?;
-
- let key = self.state.key.clone().ok_or(BlockCipherError::KeyNotSet)?;
-
- let id = self.state.execution_id.increment_in_place().to_string();
-
- let msg = self
- .executor
- .new_public_input::(&format!("{}/msg", &id))?;
- let ciphertext = self
- .executor
- .new_output::(&format!("{}/ciphertext", &id))?;
-
- self.executor.assign(&msg, block)?;
-
- self.executor
- .execute(C::circuit(), &[key, msg], &[ciphertext.clone()])
- .await?;
-
- let mut outputs = self.executor.decode_shared(&[ciphertext]).await?;
-
- let share: C::BLOCK =
- if let Ok(share) = outputs.pop().expect("share should be present").try_into() {
- share
- } else {
- panic!("share should be a block")
- };
-
- Ok(share.into())
- }
-}
diff --git a/components/cipher/block-cipher/src/lib.rs b/components/cipher/block-cipher/src/lib.rs
deleted file mode 100644
index 37b84afaaa..0000000000
--- a/components/cipher/block-cipher/src/lib.rs
+++ /dev/null
@@ -1,163 +0,0 @@
-//! This crate provides a 2PC block cipher implementation.
-//!
-//! Both parties work together to encrypt or share an encrypted block using a shared key.
-
-#![deny(missing_docs, unreachable_pub, unused_must_use)]
-#![deny(clippy::all)]
-#![deny(unsafe_code)]
-
-mod cipher;
-mod circuit;
-mod config;
-
-use async_trait::async_trait;
-
-use mpz_garble::value::ValueRef;
-
-pub use crate::{
- cipher::MpcBlockCipher,
- circuit::{Aes128, BlockCipherCircuit},
-};
-pub use config::{BlockCipherConfig, BlockCipherConfigBuilder, BlockCipherConfigBuilderError};
-
-/// Errors that can occur when using the block cipher
-#[derive(Debug, thiserror::Error)]
-#[allow(missing_docs)]
-pub enum BlockCipherError {
- #[error(transparent)]
- MemoryError(#[from] mpz_garble::MemoryError),
- #[error(transparent)]
- ExecutionError(#[from] mpz_garble::ExecutionError),
- #[error(transparent)]
- DecodeError(#[from] mpz_garble::DecodeError),
- #[error("Cipher key not set")]
- KeyNotSet,
- #[error("Input does not match block length: expected {0}, got {1}")]
- InvalidInputLength(usize, usize),
-}
-
-/// A trait for MPC block ciphers
-#[async_trait]
-pub trait BlockCipher: Send + Sync
-where
- Cipher: BlockCipherCircuit,
-{
- /// Sets the key for the block cipher.
- fn set_key(&mut self, key: ValueRef);
-
- /// Encrypts the given plaintext keeping it hidden from the other party(s).
- ///
- /// Returns the ciphertext
- ///
- /// * `plaintext` - The plaintext to encrypt
- async fn encrypt_private(&mut self, plaintext: Vec) -> Result, BlockCipherError>;
-
- /// Encrypts a plaintext provided by the other party(s).
- ///
- /// Returns the ciphertext
- async fn encrypt_blind(&mut self) -> Result, BlockCipherError>;
-
- /// Encrypts a plaintext provided by both parties. Fails if the
- /// plaintext provided by both parties does not match.
- ///
- /// Returns an additive share of the ciphertext
- ///
- /// * `plaintext` - The plaintext to encrypt
- async fn encrypt_share(&mut self, plaintext: Vec) -> Result, BlockCipherError>;
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
-
- use mpz_garble::{protocol::deap::mock::create_mock_deap_vm, Memory, Vm};
-
- use crate::circuit::Aes128;
-
- use ::aes::Aes128 as TestAes128;
- use ::cipher::{BlockEncrypt, KeyInit};
-
- fn aes128(key: [u8; 16], msg: [u8; 16]) -> [u8; 16] {
- let mut msg = msg.into();
- let cipher = TestAes128::new(&key.into());
- cipher.encrypt_block(&mut msg);
- msg.into()
- }
-
- #[tokio::test]
- async fn test_block_cipher_blind() {
- let leader_config = BlockCipherConfig::builder().id("test").build().unwrap();
- let follower_config = BlockCipherConfig::builder().id("test").build().unwrap();
-
- let key = [0u8; 16];
-
- let (mut leader_vm, mut follower_vm) = create_mock_deap_vm("test").await;
- let leader_thread = leader_vm.new_thread("test").await.unwrap();
- let follower_thread = follower_vm.new_thread("test").await.unwrap();
-
- // Key is public just for this test, typically it is private
- let leader_key = leader_thread.new_public_input::<[u8; 16]>("key").unwrap();
- let follower_key = follower_thread.new_public_input::<[u8; 16]>("key").unwrap();
-
- leader_thread.assign(&leader_key, key).unwrap();
- follower_thread.assign(&follower_key, key).unwrap();
-
- let mut leader = MpcBlockCipher::::new(leader_config, leader_thread);
- leader.set_key(leader_key);
-
- let mut follower = MpcBlockCipher::::new(follower_config, follower_thread);
- follower.set_key(follower_key);
-
- let plaintext = [0u8; 16];
-
- let (leader_ciphertext, follower_ciphertext) = tokio::try_join!(
- leader.encrypt_private(plaintext.to_vec()),
- follower.encrypt_blind()
- )
- .unwrap();
-
- let expected = aes128(key, plaintext);
-
- assert_eq!(leader_ciphertext, expected.to_vec());
- assert_eq!(leader_ciphertext, follower_ciphertext);
- }
-
- #[tokio::test]
- async fn test_block_cipher_share() {
- let leader_config = BlockCipherConfig::builder().id("test").build().unwrap();
- let follower_config = BlockCipherConfig::builder().id("test").build().unwrap();
-
- let key = [0u8; 16];
-
- let (mut leader_vm, mut follower_vm) = create_mock_deap_vm("test").await;
- let leader_thread = leader_vm.new_thread("test").await.unwrap();
- let follower_thread = follower_vm.new_thread("test").await.unwrap();
-
- // Key is public just for this test, typically it is private
- let leader_key = leader_thread.new_public_input::<[u8; 16]>("key").unwrap();
- let follower_key = follower_thread.new_public_input::<[u8; 16]>("key").unwrap();
-
- leader_thread.assign(&leader_key, key).unwrap();
- follower_thread.assign(&follower_key, key).unwrap();
-
- let mut leader = MpcBlockCipher::::new(leader_config, leader_thread);
- leader.set_key(leader_key);
-
- let mut follower = MpcBlockCipher::::new(follower_config, follower_thread);
- follower.set_key(follower_key);
-
- let plaintext = [0u8; 16];
-
- let (leader_share, follower_share) = tokio::try_join!(
- leader.encrypt_share(plaintext.to_vec()),
- follower.encrypt_share(plaintext.to_vec())
- )
- .unwrap();
-
- let expected = aes128(key, plaintext);
-
- let result: [u8; 16] = std::array::from_fn(|i| leader_share[i] ^ follower_share[i]);
-
- assert_eq!(result, expected);
- }
-}
diff --git a/components/cipher/stream-cipher/benches/mock.rs b/components/cipher/stream-cipher/benches/mock.rs
deleted file mode 100644
index bae9b694e4..0000000000
--- a/components/cipher/stream-cipher/benches/mock.rs
+++ /dev/null
@@ -1,145 +0,0 @@
-use criterion::{criterion_group, criterion_main, Criterion, Throughput};
-
-use mpz_garble::{protocol::deap::mock::create_mock_deap_vm, Memory, Vm};
-use tlsn_stream_cipher::{
- Aes128Ctr, CtrCircuit, MpcStreamCipher, StreamCipher, StreamCipherConfigBuilder,
-};
-
-async fn bench_stream_cipher_encrypt(thread_count: usize, len: usize) {
- let (mut leader_vm, mut follower_vm) = create_mock_deap_vm("test").await;
-
- let leader_thread = leader_vm.new_thread("key_config").await.unwrap();
- let leader_key = leader_thread.new_public_input::<[u8; 16]>("key").unwrap();
- let leader_iv = leader_thread.new_public_input::<[u8; 4]>("iv").unwrap();
-
- leader_thread.assign(&leader_key, [0u8; 16]).unwrap();
- leader_thread.assign(&leader_iv, [0u8; 4]).unwrap();
-
- let follower_thread = follower_vm.new_thread("key_config").await.unwrap();
- let follower_key = follower_thread.new_public_input::<[u8; 16]>("key").unwrap();
- let follower_iv = follower_thread.new_public_input::<[u8; 4]>("iv").unwrap();
-
- follower_thread.assign(&follower_key, [0u8; 16]).unwrap();
- follower_thread.assign(&follower_iv, [0u8; 4]).unwrap();
-
- let leader_thread_pool = leader_vm
- .new_thread_pool("mock", thread_count)
- .await
- .unwrap();
- let follower_thread_pool = follower_vm
- .new_thread_pool("mock", thread_count)
- .await
- .unwrap();
-
- let leader_config = StreamCipherConfigBuilder::default()
- .id("test".to_string())
- .build()
- .unwrap();
-
- let follower_config = StreamCipherConfigBuilder::default()
- .id("test".to_string())
- .build()
- .unwrap();
-
- let mut leader = MpcStreamCipher::::new(leader_config, leader_thread_pool);
- leader.set_key(leader_key, leader_iv);
-
- let mut follower = MpcStreamCipher::::new(follower_config, follower_thread_pool);
- follower.set_key(follower_key, follower_iv);
-
- let plaintext = vec![0u8; len];
- let explicit_nonce = vec![0u8; 8];
-
- _ = tokio::try_join!(
- leader.encrypt_private(explicit_nonce.clone(), plaintext),
- follower.encrypt_blind(explicit_nonce, len)
- )
- .unwrap();
-
- _ = tokio::try_join!(leader_vm.finalize(), follower_vm.finalize()).unwrap();
-}
-
-async fn bench_stream_cipher_zk(thread_count: usize, len: usize) {
- let (mut leader_vm, mut follower_vm) = create_mock_deap_vm("test").await;
-
- let key = [0u8; 16];
- let iv = [0u8; 4];
-
- let leader_thread = leader_vm.new_thread("key_config").await.unwrap();
- let leader_key = leader_thread.new_public_input::<[u8; 16]>("key").unwrap();
- let leader_iv = leader_thread.new_public_input::<[u8; 4]>("iv").unwrap();
-
- leader_thread.assign(&leader_key, key).unwrap();
- leader_thread.assign(&leader_iv, iv).unwrap();
-
- let follower_thread = follower_vm.new_thread("key_config").await.unwrap();
- let follower_key = follower_thread.new_public_input::<[u8; 16]>("key").unwrap();
- let follower_iv = follower_thread.new_public_input::<[u8; 4]>("iv").unwrap();
-
- follower_thread.assign(&follower_key, key).unwrap();
- follower_thread.assign(&follower_iv, iv).unwrap();
-
- let leader_thread_pool = leader_vm
- .new_thread_pool("mock", thread_count)
- .await
- .unwrap();
- let follower_thread_pool = follower_vm
- .new_thread_pool("mock", thread_count)
- .await
- .unwrap();
-
- let leader_config = StreamCipherConfigBuilder::default()
- .id("test".to_string())
- .build()
- .unwrap();
-
- let follower_config = StreamCipherConfigBuilder::default()
- .id("test".to_string())
- .build()
- .unwrap();
-
- let mut leader = MpcStreamCipher::::new(leader_config, leader_thread_pool);
- leader.set_key(leader_key, leader_iv);
-
- let mut follower = MpcStreamCipher::::new(follower_config, follower_thread_pool);
- follower.set_key(follower_key, follower_iv);
-
- let plaintext = vec![0u8; len];
- let explicit_nonce = [0u8; 8];
- let ciphertext = Aes128Ctr::apply_keystream(&key, &iv, 2, &explicit_nonce, &plaintext).unwrap();
-
- _ = tokio::try_join!(
- leader.prove_plaintext(explicit_nonce.to_vec(), plaintext),
- follower.verify_plaintext(explicit_nonce.to_vec(), ciphertext)
- )
- .unwrap();
-
- _ = tokio::try_join!(leader_vm.finalize(), follower_vm.finalize()).unwrap();
-}
-
-fn criterion_benchmark(c: &mut Criterion) {
- let rt = tokio::runtime::Runtime::new().unwrap();
- let thread_count = 8;
- let len = 1024;
-
- let mut group = c.benchmark_group("stream_cipher/encrypt_private");
- group.throughput(Throughput::Bytes(len as u64));
- group.bench_function(format!("{}", len), |b| {
- b.to_async(&rt)
- .iter(|| async { bench_stream_cipher_encrypt(thread_count, len).await })
- });
-
- drop(group);
-
- let mut group = c.benchmark_group("stream_cipher/zk");
- group.throughput(Throughput::Bytes(len as u64));
- group.bench_function(format!("{}", len), |b| {
- b.to_async(&rt)
- .iter(|| async { bench_stream_cipher_zk(thread_count, len).await })
- });
-
- drop(group);
-}
-
-criterion_group!(benches, criterion_benchmark);
-criterion_main!(benches);
diff --git a/components/cipher/stream-cipher/src/stream_cipher.rs b/components/cipher/stream-cipher/src/stream_cipher.rs
deleted file mode 100644
index 601bf6921a..0000000000
--- a/components/cipher/stream-cipher/src/stream_cipher.rs
+++ /dev/null
@@ -1,842 +0,0 @@
-use async_trait::async_trait;
-use mpz_circuits::types::Value;
-use std::{collections::HashMap, fmt::Debug, marker::PhantomData};
-
-use mpz_garble::{
- value::ValueRef, Decode, DecodePrivate, Execute, Memory, Prove, Thread, ThreadPool, Verify,
-};
-use utils::id::NestedId;
-
-use crate::{
- cipher::CtrCircuit,
- circuit::build_array_xor,
- config::{InputText, KeyBlockConfig, StreamCipherConfig},
- StreamCipher, StreamCipherError,
-};
-
-/// An MPC stream cipher.
-pub struct MpcStreamCipher
-where
- C: CtrCircuit,
- E: Thread + Execute + Decode + DecodePrivate + Send + Sync,
-{
- config: StreamCipherConfig,
- state: State,
- thread_pool: ThreadPool,
-
- _cipher: PhantomData,
-}
-
-struct State {
- /// Encoded key and IV for the cipher.
- encoded_key_iv: Option,
- /// Key and IV for the cipher.
- key_iv: Option,
- /// Unique identifier for each execution of the cipher.
- execution_id: NestedId,
- /// Unique identifier for each byte in the transcript.
- transcript_counter: NestedId,
- /// Unique identifier for each byte in the ciphertext (prefixed with execution id).
- ciphertext_counter: NestedId,
- /// Persists the transcript counter for each transcript id.
- transcript_state: HashMap,
-}
-
-#[derive(Clone)]
-struct EncodedKeyAndIv {
- key: ValueRef,
- iv: ValueRef,
-}
-
-#[derive(Clone)]
-struct KeyAndIv {
- key: Vec,
- iv: Vec,
-}
-
-impl MpcStreamCipher
-where
- C: CtrCircuit,
- E: Thread + Execute + Prove + Verify + Decode + DecodePrivate + Send + Sync + 'static,
-{
- /// Creates a new counter-mode cipher.
- pub fn new(config: StreamCipherConfig, thread_pool: ThreadPool) -> Self {
- let execution_id = NestedId::new(&config.id).append_counter();
- let transcript_counter = NestedId::new(&config.transcript_id).append_counter();
- let ciphertext_counter = execution_id.append_string("ciphertext").append_counter();
-
- Self {
- config,
- state: State {
- encoded_key_iv: None,
- key_iv: None,
- execution_id,
- transcript_counter,
- ciphertext_counter,
- transcript_state: HashMap::new(),
- },
- thread_pool,
- _cipher: PhantomData,
- }
- }
-
- /// Returns unique identifiers for the next bytes in the transcript.
- fn plaintext_ids(&mut self, len: usize) -> Vec {
- (0..len)
- .map(|_| {
- self.state
- .transcript_counter
- .increment_in_place()
- .to_string()
- })
- .collect()
- }
-
- /// Returns unique identifiers for the next bytes in the ciphertext.
- fn ciphertext_ids(&mut self, len: usize) -> Vec {
- (0..len)
- .map(|_| {
- self.state
- .ciphertext_counter
- .increment_in_place()
- .to_string()
- })
- .collect()
- }
-
- async fn compute_keystream(
- &mut self,
- explicit_nonce: Vec,
- start_ctr: usize,
- len: usize,
- mode: ExecutionMode,
- ) -> Result {
- let EncodedKeyAndIv { key, iv } = self
- .state
- .encoded_key_iv
- .clone()
- .ok_or(StreamCipherError::KeyIvNotSet)?;
-
- let explicit_nonce_len = explicit_nonce.len();
- let explicit_nonce: C::NONCE = explicit_nonce.try_into().map_err(|_| {
- StreamCipherError::InvalidExplicitNonceLength {
- expected: C::NONCE_LEN,
- actual: explicit_nonce_len,
- }
- })?;
-
- // Divide msg length by block size rounding up
- let block_count = (len / C::BLOCK_LEN) + (len % C::BLOCK_LEN != 0) as usize;
-
- let block_configs = (0..block_count)
- .map(|i| {
- KeyBlockConfig::::new(
- key.clone(),
- iv.clone(),
- explicit_nonce,
- (start_ctr + i) as u32,
- )
- })
- .collect::>();
-
- let execution_id = self.state.execution_id.increment_in_place();
-
- let keystream = compute_keystream(
- &mut self.thread_pool,
- execution_id,
- block_configs,
- len,
- mode,
- )
- .await?;
-
- Ok(keystream)
- }
-
- /// Applies the keystream to the provided input text.
- #[cfg_attr(
- feature = "tracing",
- tracing::instrument(level = "trace", skip(self), err)
- )]
- async fn apply_keystream(
- &mut self,
- input_text: InputText,
- keystream: ValueRef,
- mode: ExecutionMode,
- ) -> Result {
- let execution_id = self.state.execution_id.increment_in_place();
-
- let mut scope = self.thread_pool.new_scope();
- scope.push(move |thread| {
- Box::pin(apply_keystream(
- thread,
- mode,
- execution_id,
- input_text,
- keystream,
- ))
- });
-
- let output_text = scope.wait().await.into_iter().next().unwrap()?;
-
- Ok(output_text)
- }
-
- async fn decode_public(&mut self, value: ValueRef) -> Result {
- let mut scope = self.thread_pool.new_scope();
- scope.push(move |thread| Box::pin(async move { thread.decode(&[value]).await }));
- let mut output = scope.wait().await.into_iter().next().unwrap()?;
- Ok(output.pop().unwrap())
- }
-
- async fn decode_private(&mut self, value: ValueRef) -> Result {
- let mut scope = self.thread_pool.new_scope();
- scope.push(move |thread| Box::pin(async move { thread.decode_private(&[value]).await }));
- let mut output = scope.wait().await.into_iter().next().unwrap()?;
- Ok(output.pop().unwrap())
- }
-
- async fn decode_blind(&mut self, value: ValueRef) -> Result<(), StreamCipherError> {
- let mut scope = self.thread_pool.new_scope();
- scope.push(move |thread| Box::pin(async move { thread.decode_blind(&[value]).await }));
- scope.wait().await.into_iter().next().unwrap()?;
- Ok(())
- }
-
- async fn prove(&mut self, value: ValueRef) -> Result<(), StreamCipherError> {
- let mut scope = self.thread_pool.new_scope();
- scope.push(move |thread| Box::pin(async move { thread.prove(&[value]).await }));
- scope.wait().await.into_iter().next().unwrap()?;
- Ok(())
- }
-
- async fn verify(&mut self, value: ValueRef, expected: Value) -> Result<(), StreamCipherError> {
- let mut scope = self.thread_pool.new_scope();
- scope.push(move |thread| {
- Box::pin(async move { thread.verify(&[value], &[expected]).await })
- });
- scope.wait().await.into_iter().next().unwrap()?;
- Ok(())
- }
-}
-
-#[async_trait]
-impl StreamCipher for MpcStreamCipher
-where
- C: CtrCircuit,
- E: Thread + Execute + Prove + Verify + Decode + DecodePrivate + Send + Sync + 'static,
-{
- fn set_key(&mut self, key: ValueRef, iv: ValueRef) {
- self.state.encoded_key_iv = Some(EncodedKeyAndIv { key, iv });
- }
-
- async fn decode_key_private(&mut self) -> Result<(), StreamCipherError> {
- let EncodedKeyAndIv { key, iv } = self
- .state
- .encoded_key_iv
- .clone()
- .ok_or(StreamCipherError::KeyIvNotSet)?;
-
- let mut scope = self.thread_pool.new_scope();
- scope.push(move |thread| Box::pin(async move { thread.decode_private(&[key, iv]).await }));
- let output = scope.wait().await.into_iter().next().unwrap()?;
-
- let [key, iv]: [_; 2] = output.try_into().expect("decoded 2 values");
- let key: Vec = key.try_into().expect("key is an array");
- let iv: Vec = iv.try_into().expect("iv is an array");
-
- self.state.key_iv = Some(KeyAndIv { key, iv });
-
- Ok(())
- }
-
- async fn decode_key_blind(&mut self) -> Result<(), StreamCipherError> {
- let EncodedKeyAndIv { key, iv } = self
- .state
- .encoded_key_iv
- .clone()
- .ok_or(StreamCipherError::KeyIvNotSet)?;
-
- let mut scope = self.thread_pool.new_scope();
- scope.push(move |thread| Box::pin(async move { thread.decode_blind(&[key, iv]).await }));
- scope.wait().await.into_iter().next().unwrap()?;
-
- Ok(())
- }
-
- fn set_transcript_id(&mut self, id: &str) {
- let current_id = self
- .state
- .transcript_counter
- .root()
- .expect("root id is set");
- let current_counter = self.state.transcript_counter.clone();
- self.state
- .transcript_state
- .insert(current_id.to_string(), current_counter);
-
- if let Some(counter) = self.state.transcript_state.get(id) {
- self.state.transcript_counter = counter.clone();
- } else {
- self.state.transcript_counter = NestedId::new(id).append_counter();
- }
- }
-
- #[cfg_attr(
- feature = "tracing",
- tracing::instrument(level = "debug", skip(self, plaintext), err)
- )]
- async fn encrypt_public(
- &mut self,
- explicit_nonce: Vec,
- plaintext: Vec,
- ) -> Result, StreamCipherError> {
- let keystream = self
- .compute_keystream(
- explicit_nonce,
- self.config.start_ctr,
- plaintext.len(),
- ExecutionMode::Mpc,
- )
- .await?;
-
- let plaintext_ids = self.plaintext_ids(plaintext.len());
- let ciphertext = self
- .apply_keystream(
- InputText::Public {
- ids: plaintext_ids,
- text: plaintext,
- },
- keystream,
- ExecutionMode::Mpc,
- )
- .await?;
-
- let ciphertext: Vec = self
- .decode_public(ciphertext)
- .await?
- .try_into()
- .expect("ciphertext is array");
-
- Ok(ciphertext)
- }
-
- #[cfg_attr(
- feature = "tracing",
- tracing::instrument(level = "debug", skip(self, plaintext), err)
- )]
- async fn encrypt_private(
- &mut self,
- explicit_nonce: Vec,
- plaintext: Vec,
- ) -> Result, StreamCipherError> {
- let keystream = self
- .compute_keystream(
- explicit_nonce,
- self.config.start_ctr,
- plaintext.len(),
- ExecutionMode::Mpc,
- )
- .await?;
-
- let plaintext_ids = self.plaintext_ids(plaintext.len());
- let ciphertext = self
- .apply_keystream(
- InputText::Private {
- ids: plaintext_ids,
- text: plaintext,
- },
- keystream,
- ExecutionMode::Mpc,
- )
- .await?;
-
- let ciphertext: Vec = self
- .decode_public(ciphertext)
- .await?
- .try_into()
- .expect("ciphertext is array");
-
- Ok(ciphertext)
- }
-
- #[cfg_attr(
- feature = "tracing",
- tracing::instrument(level = "debug", skip(self), err)
- )]
- async fn encrypt_blind(
- &mut self,
- explicit_nonce: Vec,
- len: usize,
- ) -> Result, StreamCipherError> {
- let keystream = self
- .compute_keystream(
- explicit_nonce,
- self.config.start_ctr,
- len,
- ExecutionMode::Mpc,
- )
- .await?;
-
- let plaintext_ids = self.plaintext_ids(len);
- let ciphertext = self
- .apply_keystream(
- InputText::Blind { ids: plaintext_ids },
- keystream,
- ExecutionMode::Mpc,
- )
- .await?;
-
- let ciphertext: Vec = self
- .decode_public(ciphertext)
- .await?
- .try_into()
- .expect("ciphertext is array");
-
- Ok(ciphertext)
- }
-
- #[cfg_attr(
- feature = "tracing",
- tracing::instrument(level = "debug", skip(self), err)
- )]
- async fn decrypt_public(
- &mut self,
- explicit_nonce: Vec,
- ciphertext: Vec,
- ) -> Result, StreamCipherError> {
- // TODO: We may want to support writing to the transcript when decrypting
- // in public mode.
- let keystream = self
- .compute_keystream(
- explicit_nonce,
- self.config.start_ctr,
- ciphertext.len(),
- ExecutionMode::Mpc,
- )
- .await?;
-
- let ciphertext_ids = self.ciphertext_ids(ciphertext.len());
- let plaintext = self
- .apply_keystream(
- InputText::Public {
- ids: ciphertext_ids,
- text: ciphertext,
- },
- keystream,
- ExecutionMode::Mpc,
- )
- .await?;
-
- let plaintext: Vec = self
- .decode_public(plaintext)
- .await?
- .try_into()
- .expect("plaintext is array");
-
- Ok(plaintext)
- }
-
- #[cfg_attr(
- feature = "tracing",
- tracing::instrument(level = "debug", skip(self), err)
- )]
- async fn decrypt_private(
- &mut self,
- explicit_nonce: Vec,
- ciphertext: Vec,
- ) -> Result, StreamCipherError> {
- let keystream_ref = self
- .compute_keystream(
- explicit_nonce,
- self.config.start_ctr,
- ciphertext.len(),
- ExecutionMode::Mpc,
- )
- .await?;
-
- let keystream: Vec = self
- .decode_private(keystream_ref.clone())
- .await?
- .try_into()
- .expect("keystream is array");
-
- let plaintext = ciphertext
- .into_iter()
- .zip(keystream)
- .map(|(c, k)| c ^ k)
- .collect::>();
-
- // Prove plaintext encrypts back to ciphertext
- let plaintext_ids = self.plaintext_ids(plaintext.len());
- let ciphertext = self
- .apply_keystream(
- InputText::Private {
- ids: plaintext_ids,
- text: plaintext.clone(),
- },
- keystream_ref,
- ExecutionMode::Prove,
- )
- .await?;
-
- self.prove(ciphertext).await?;
-
- Ok(plaintext)
- }
-
- #[cfg_attr(
- feature = "tracing",
- tracing::instrument(level = "debug", skip(self), err)
- )]
- async fn decrypt_blind(
- &mut self,
- explicit_nonce: Vec,
- ciphertext: Vec,
- ) -> Result<(), StreamCipherError> {
- let keystream_ref = self
- .compute_keystream(
- explicit_nonce,
- self.config.start_ctr,
- ciphertext.len(),
- ExecutionMode::Mpc,
- )
- .await?;
-
- self.decode_blind(keystream_ref.clone()).await?;
-
- // Verify the plaintext encrypts back to ciphertext
- let plaintext_ids = self.plaintext_ids(ciphertext.len());
- let ciphertext_ref = self
- .apply_keystream(
- InputText::Blind { ids: plaintext_ids },
- keystream_ref,
- ExecutionMode::Verify,
- )
- .await?;
-
- self.verify(ciphertext_ref, ciphertext.into()).await?;
-
- Ok(())
- }
-
- async fn prove_plaintext(
- &mut self,
- explicit_nonce: Vec,
- ciphertext: Vec,
- ) -> Result, StreamCipherError> {
- let KeyAndIv { key, iv } = self
- .state
- .key_iv
- .clone()
- .ok_or(StreamCipherError::KeyIvNotSet)?;
-
- let plaintext = C::apply_keystream(
- &key,
- &iv,
- self.config.start_ctr,
- &explicit_nonce,
- &ciphertext,
- )?;
-
- // Prove plaintext encrypts back to ciphertext
- let keystream = self
- .compute_keystream(
- explicit_nonce,
- self.config.start_ctr,
- plaintext.len(),
- ExecutionMode::Prove,
- )
- .await?;
-
- let plaintext_ids = self.plaintext_ids(plaintext.len());
- let ciphertext = self
- .apply_keystream(
- InputText::Private {
- ids: plaintext_ids,
- text: plaintext.clone(),
- },
- keystream,
- ExecutionMode::Prove,
- )
- .await?;
-
- self.prove(ciphertext).await?;
-
- Ok(plaintext)
- }
-
- async fn verify_plaintext(
- &mut self,
- explicit_nonce: Vec,
- ciphertext: Vec,
- ) -> Result<(), StreamCipherError> {
- let keystream = self
- .compute_keystream(
- explicit_nonce,
- self.config.start_ctr,
- ciphertext.len(),
- ExecutionMode::Verify,
- )
- .await?;
-
- let plaintext_ids = self.plaintext_ids(ciphertext.len());
- let ciphertext_ref = self
- .apply_keystream(
- InputText::Blind { ids: plaintext_ids },
- keystream,
- ExecutionMode::Verify,
- )
- .await?;
-
- self.verify(ciphertext_ref, ciphertext.into()).await?;
-
- Ok(())
- }
-
- #[cfg_attr(
- feature = "tracing",
- tracing::instrument(level = "info", skip(self), err)
- )]
- async fn share_keystream_block(
- &mut self,
- explicit_nonce: Vec,
- ctr: usize,
- ) -> Result, StreamCipherError> {
- let EncodedKeyAndIv { key, iv } = self
- .state
- .encoded_key_iv
- .clone()
- .ok_or(StreamCipherError::KeyIvNotSet)?;
-
- let explicit_nonce_len = explicit_nonce.len();
- let explicit_nonce: C::NONCE = explicit_nonce.try_into().map_err(|_| {
- StreamCipherError::InvalidExplicitNonceLength {
- expected: C::NONCE_LEN,
- actual: explicit_nonce_len,
- }
- })?;
-
- let block_id = self.state.execution_id.increment_in_place();
- let mut scope = self.thread_pool.new_scope();
- scope.push(move |thread| {
- Box::pin(async move {
- let key_block = compute_key_block(
- thread,
- block_id,
- KeyBlockConfig::::new(key, iv, explicit_nonce, ctr as u32),
- ExecutionMode::Mpc,
- )
- .await?;
-
- let share = thread
- .decode_shared(&[key_block])
- .await?
- .into_iter()
- .next()
- .unwrap();
-
- Ok::<_, StreamCipherError>(share)
- })
- });
-
- let share: Vec = scope
- .wait()
- .await
- .into_iter()
- .next()
- .unwrap()?
- .try_into()
- .expect("share is an array");
-
- Ok(share)
- }
-}
-
-#[derive(Debug, Clone, Copy)]
-enum ExecutionMode {
- Mpc,
- Prove,
- Verify,
-}
-
-async fn apply_keystream(
- thread: &mut T,
- mode: ExecutionMode,
- execution_id: NestedId,
- input_text: InputText,
- keystream: ValueRef,
-) -> Result {
- let input_text = match input_text {
- InputText::Public { ids, text } => {
- let refs = text
- .into_iter()
- .zip(ids)
- .map(|(byte, id)| {
- let value_ref = thread.new_public_input::(&id)?;
- thread.assign(&value_ref, byte)?;
-
- Ok::<_, StreamCipherError>(value_ref)
- })
- .collect::, _>>()?;
- thread.array_from_values(&refs)?
- }
- InputText::Private { ids, text } => {
- let refs = text
- .into_iter()
- .zip(ids)
- .map(|(byte, id)| {
- let value_ref = thread.new_private_input::(&id)?;
- thread.assign(&value_ref, byte)?;
-
- Ok::<_, StreamCipherError>(value_ref)
- })
- .collect::, _>>()?;
- thread.array_from_values(&refs)?
- }
- InputText::Blind { ids } => {
- let refs = ids
- .into_iter()
- .map(|id| thread.new_blind_input::(&id))
- .collect::, _>>()?;
- thread.array_from_values(&refs)?
- }
- };
-
- let output_text = thread.new_array_output::(
- &execution_id.append_string("output").to_string(),
- input_text.len(),
- )?;
-
- let circ = build_array_xor(input_text.len());
-
- match mode {
- ExecutionMode::Mpc => {
- thread
- .execute(circ, &[input_text, keystream], &[output_text.clone()])
- .await?;
- }
- ExecutionMode::Prove => {
- thread
- .execute_prove(circ, &[input_text, keystream], &[output_text.clone()])
- .await?;
- }
- ExecutionMode::Verify => {
- thread
- .execute_verify(circ, &[input_text, keystream], &[output_text.clone()])
- .await?;
- }
- }
-
- Ok(output_text)
-}
-
-#[cfg_attr(
- feature = "tracing",
- tracing::instrument(level = "trace", skip(thread_pool), err)
-)]
-async fn compute_keystream<
- T: Thread + Memory + Execute + Prove + Verify + Decode + DecodePrivate + Send + 'static,
- C: CtrCircuit,
->(
- thread_pool: &mut ThreadPool,
- execution_id: NestedId,
- configs: Vec>,
- len: usize,
- mode: ExecutionMode,
-) -> Result {
- let mut block_id = execution_id.append_counter();
- let mut scope = thread_pool.new_scope();
-
- for config in configs {
- let block_id = block_id.increment_in_place();
- scope.push(move |thread| Box::pin(compute_key_block(thread, block_id, config, mode)));
- }
-
- let key_blocks = scope
- .wait()
- .await
- .into_iter()
- .collect::, _>>()?;
-
- // Flatten the key blocks into a single array.
- let keystream = key_blocks
- .iter()
- .flat_map(|block| block.iter())
- .take(len)
- .cloned()
- .map(|id| ValueRef::Value { id })
- .collect::>();
-
- let mut scope = thread_pool.new_scope();
- scope.push(move |thread| Box::pin(async move { thread.array_from_values(&keystream) }));
-
- let keystream = scope.wait().await.into_iter().next().unwrap()?;
-
- Ok(keystream)
-}
-
-#[cfg_attr(
- feature = "tracing",
- tracing::instrument(level = "trace", skip(thread), err)
-)]
-async fn compute_key_block<
- T: Memory + Execute + Prove + Verify + Decode + DecodePrivate + Send,
- C: CtrCircuit,
->(
- thread: &mut T,
- block_id: NestedId,
- config: KeyBlockConfig,
- mode: ExecutionMode,
-) -> Result {
- let KeyBlockConfig {
- key,
- iv,
- explicit_nonce,
- ctr,
- ..
- } = config;
-
- let explicit_nonce_ref = thread.new_public_input::<::NONCE>(
- &block_id.append_string("explicit_nonce").to_string(),
- )?;
- let ctr_ref = thread.new_public_input::<[u8; 4]>(&block_id.append_string("ctr").to_string())?;
- let key_block =
- thread.new_output::(&block_id.append_string("key_block").to_string())?;
-
- thread.assign(&explicit_nonce_ref, explicit_nonce)?;
- thread.assign(&ctr_ref, ctr.to_be_bytes())?;
-
- // Execute circuit
- match mode {
- ExecutionMode::Mpc => {
- thread
- .execute(
- C::circuit(),
- &[key, iv, explicit_nonce_ref, ctr_ref],
- &[key_block.clone()],
- )
- .await?;
- }
- ExecutionMode::Prove => {
- thread
- .execute_prove(
- C::circuit(),
- &[key, iv, explicit_nonce_ref, ctr_ref],
- &[key_block.clone()],
- )
- .await?;
- }
- ExecutionMode::Verify => {
- thread
- .execute_verify(
- C::circuit(),
- &[key, iv, explicit_nonce_ref, ctr_ref],
- &[key_block.clone()],
- )
- .await?;
- }
- }
-
- Ok(key_block)
-}
diff --git a/components/integration-tests/Cargo.toml b/components/integration-tests/Cargo.toml
deleted file mode 100644
index 6f6402566b..0000000000
--- a/components/integration-tests/Cargo.toml
+++ /dev/null
@@ -1,37 +0,0 @@
-[package]
-name = "integration-tests"
-version = "0.0.0"
-edition = "2021"
-publish = false
-
-# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
-
-[dependencies]
-
-[profile.release]
-lto = true
-
-
-[dev-dependencies]
-mpz-garble = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ecb8c54" }
-mpz-ot = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ecb8c54" }
-mpz-share-conversion = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ecb8c54" }
-tlsn-block-cipher = { path = "../cipher/block-cipher" }
-tlsn-stream-cipher = { path = "../cipher/stream-cipher" }
-tlsn-universal-hash = { path = "../universal-hash" }
-tlsn-aead = { path = "../aead" }
-tlsn-key-exchange = { path = "../key-exchange" }
-tlsn-point-addition = { path = "../point-addition" }
-tlsn-hmac-sha256 = { path = "../prf/hmac-sha256" }
-tlsn-utils-aio = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "8d8ffe1" }
-
-uid-mux = { path = "../uid-mux" }
-
-p256 = { version = "0.13" }
-
-futures = "0.3"
-rand_chacha = "0.3"
-rand = "0.8"
-
-tokio = { version = "1", features = ["macros", "rt", "rt-multi-thread"] }
-tokio-util = { version = "0.7", features = ["compat"] }
diff --git a/components/integration-tests/tests/test.rs b/components/integration-tests/tests/test.rs
deleted file mode 100644
index 04cefbfde0..0000000000
--- a/components/integration-tests/tests/test.rs
+++ /dev/null
@@ -1,396 +0,0 @@
-use aead::{
- aes_gcm::{AesGcmConfig, MpcAesGcm, Role as AesGcmRole},
- Aead,
-};
-use block_cipher::{Aes128, BlockCipherConfigBuilder, MpcBlockCipher};
-use ff::Gf2_128;
-use futures::StreamExt;
-use hmac_sha256::{MpcPrf, Prf, PrfConfig, SessionKeys};
-use key_exchange::{KeyExchange, KeyExchangeConfig, Role as KeyExchangeRole};
-use mpz_garble::{config::Role as GarbleRole, protocol::deap::DEAPVm, Vm};
-use mpz_ot::{
- actor::kos::{ReceiverActor, SenderActor},
- chou_orlandi::{
- Receiver as BaseReceiver, ReceiverConfig as BaseReceiverConfig, Sender as BaseSender,
- SenderConfig as BaseSenderConfig,
- },
- kos::{Receiver, ReceiverConfig, Sender, SenderConfig},
-};
-use mpz_share_conversion as ff;
-use mpz_share_conversion::{ShareConversionReveal, ShareConversionVerify};
-use p256::{NonZeroScalar, PublicKey, SecretKey};
-use point_addition::{MpcPointAddition, Role as PointAdditionRole, P256};
-use rand::SeedableRng;
-use rand_chacha::ChaCha20Rng;
-use tlsn_stream_cipher::{Aes128Ctr, MpcStreamCipher, StreamCipherConfig};
-use tlsn_universal_hash::ghash::{Ghash, GhashConfig};
-use tokio_util::compat::TokioAsyncReadCompatExt;
-use uid_mux::{yamux, UidYamux};
-use utils_aio::{codec::BincodeMux, mux::MuxChannel};
-
-const OT_SETUP_COUNT: usize = 50_000;
-
-/// The following integration test checks the interplay of individual components of the TLSNotary
-/// protocol. These are:
-/// - channel multiplexing
-/// - oblivious transfer
-/// - point addition
-/// - key exchange
-/// - prf
-/// - aead cipher (stream cipher + ghash)
-#[tokio::test]
-async fn test_components() {
- let mut rng = ChaCha20Rng::seed_from_u64(0);
-
- let (leader_socket, follower_socket) = tokio::io::duplex(1 << 25);
-
- let mut leader_mux = UidYamux::new(
- yamux::Config::default(),
- leader_socket.compat(),
- yamux::Mode::Client,
- );
- let mut follower_mux = UidYamux::new(
- yamux::Config::default(),
- follower_socket.compat(),
- yamux::Mode::Server,
- );
-
- let leader_mux_control = leader_mux.control();
- let follower_mux_control = follower_mux.control();
-
- tokio::spawn(async move { leader_mux.run().await.unwrap() });
- tokio::spawn(async move { follower_mux.run().await.unwrap() });
-
- let mut leader_mux = BincodeMux::new(leader_mux_control);
- let mut follower_mux = BincodeMux::new(follower_mux_control);
-
- let leader_ot_sender_config = SenderConfig::default();
- let follower_ot_recvr_config = ReceiverConfig::default();
-
- let follower_ot_sender_config = SenderConfig::builder().sender_commit().build().unwrap();
- let leader_ot_recvr_config = ReceiverConfig::builder().sender_commit().build().unwrap();
-
- let (leader_ot_sender_sink, leader_ot_sender_stream) =
- leader_mux.get_channel("ot/0").await.unwrap().split();
-
- let (follower_ot_recvr_sink, follower_ot_recvr_stream) =
- follower_mux.get_channel("ot/0").await.unwrap().split();
-
- let (leader_ot_receiver_sink, leader_ot_receiver_stream) =
- leader_mux.get_channel("ot/1").await.unwrap().split();
-
- let (follower_ot_sender_sink, follower_ot_sender_stream) =
- follower_mux.get_channel("ot/1").await.unwrap().split();
-
- let mut leader_ot_sender_actor = SenderActor::new(
- Sender::new(
- leader_ot_sender_config,
- BaseReceiver::new(BaseReceiverConfig::default()),
- ),
- leader_ot_sender_sink,
- leader_ot_sender_stream,
- );
-
- let mut follower_ot_recvr_actor = ReceiverActor::new(
- Receiver::new(
- follower_ot_recvr_config,
- BaseSender::new(BaseSenderConfig::default()),
- ),
- follower_ot_recvr_sink,
- follower_ot_recvr_stream,
- );
-
- let mut leader_ot_recvr_actor = ReceiverActor::new(
- Receiver::new(
- leader_ot_recvr_config,
- BaseSender::new(
- BaseSenderConfig::builder()
- .receiver_commit()
- .build()
- .unwrap(),
- ),
- ),
- leader_ot_receiver_sink,
- leader_ot_receiver_stream,
- );
-
- let mut follower_ot_sender_actor = SenderActor::new(
- Sender::new(
- follower_ot_sender_config,
- BaseReceiver::new(
- BaseReceiverConfig::builder()
- .receiver_commit()
- .build()
- .unwrap(),
- ),
- ),
- follower_ot_sender_sink,
- follower_ot_sender_stream,
- );
-
- let leader_ot_sender = leader_ot_sender_actor.sender();
- let follower_ot_recvr = follower_ot_recvr_actor.receiver();
-
- let leader_ot_recvr = leader_ot_recvr_actor.receiver();
- let follower_ot_sender = follower_ot_sender_actor.sender();
-
- tokio::spawn(async move {
- leader_ot_sender_actor.setup(OT_SETUP_COUNT).await.unwrap();
- leader_ot_sender_actor.run().await.unwrap();
- });
-
- tokio::spawn(async move {
- follower_ot_recvr_actor.setup(OT_SETUP_COUNT).await.unwrap();
- follower_ot_recvr_actor.run().await.unwrap();
- });
-
- tokio::spawn(async move {
- leader_ot_recvr_actor.setup(OT_SETUP_COUNT).await.unwrap();
- leader_ot_recvr_actor.run().await.unwrap();
- });
-
- tokio::spawn(async move {
- follower_ot_sender_actor
- .setup(OT_SETUP_COUNT)
- .await
- .unwrap();
- follower_ot_sender_actor.run().await.unwrap();
- follower_ot_sender_actor.reveal().await.unwrap();
- });
-
- let mut leader_vm = DEAPVm::new(
- "vm",
- GarbleRole::Leader,
- [0u8; 32],
- leader_mux.get_channel("vm").await.unwrap(),
- Box::new(leader_mux.clone()),
- leader_ot_sender.clone(),
- leader_ot_recvr.clone(),
- );
-
- let mut follower_vm = DEAPVm::new(
- "vm",
- GarbleRole::Follower,
- [1u8; 32],
- follower_mux.get_channel("vm").await.unwrap(),
- Box::new(follower_mux.clone()),
- follower_ot_sender.clone(),
- follower_ot_recvr.clone(),
- );
-
- let leader_p256_sender = ff::ConverterSender::::new(
- ff::SenderConfig::builder().id("p256/0").build().unwrap(),
- leader_ot_sender.clone(),
- leader_mux.get_channel("p256/0").await.unwrap(),
- );
-
- let leader_p256_receiver = ff::ConverterReceiver::::new(
- ff::ReceiverConfig::builder().id("p256/1").build().unwrap(),
- follower_ot_recvr.clone(),
- leader_mux.get_channel("p256/1").await.unwrap(),
- );
-
- let follower_p256_sender = ff::ConverterSender::::new(
- ff::SenderConfig::builder().id("p256/1").build().unwrap(),
- leader_ot_sender.clone(),
- follower_mux.get_channel("p256/1").await.unwrap(),
- );
-
- let follower_p256_receiver = ff::ConverterReceiver::::new(
- ff::ReceiverConfig::builder().id("p256/0").build().unwrap(),
- follower_ot_recvr.clone(),
- follower_mux.get_channel("p256/0").await.unwrap(),
- );
-
- let leader_pa_sender = MpcPointAddition::new(PointAdditionRole::Leader, leader_p256_sender);
- let leader_pa_receiver = MpcPointAddition::new(PointAdditionRole::Leader, leader_p256_receiver);
-
- let follower_pa_sender =
- MpcPointAddition::new(PointAdditionRole::Follower, follower_p256_sender);
-
- let follower_pa_receiver =
- MpcPointAddition::new(PointAdditionRole::Follower, follower_p256_receiver);
-
- let mut leader_ke = key_exchange::KeyExchangeCore::new(
- leader_mux.get_channel("ke").await.unwrap(),
- leader_pa_sender,
- leader_pa_receiver,
- leader_vm.new_thread("ke").await.unwrap(),
- KeyExchangeConfig::builder()
- .id("ke")
- .role(KeyExchangeRole::Leader)
- .build()
- .unwrap(),
- );
-
- let mut follower_ke = key_exchange::KeyExchangeCore::new(
- follower_mux.get_channel("ke").await.unwrap(),
- follower_pa_sender,
- follower_pa_receiver,
- follower_vm.new_thread("ke").await.unwrap(),
- KeyExchangeConfig::builder()
- .id("ke")
- .role(KeyExchangeRole::Follower)
- .build()
- .unwrap(),
- );
-
- let (leader_pms, follower_pms) =
- futures::try_join!(leader_ke.setup(), follower_ke.setup()).unwrap();
-
- let mut leader_prf = MpcPrf::new(
- PrfConfig::builder()
- .role(hmac_sha256::Role::Leader)
- .build()
- .unwrap(),
- leader_vm.new_thread("prf/0").await.unwrap(),
- leader_vm.new_thread("prf/1").await.unwrap(),
- );
- let mut follower_prf = MpcPrf::new(
- PrfConfig::builder()
- .role(hmac_sha256::Role::Follower)
- .build()
- .unwrap(),
- follower_vm.new_thread("prf/0").await.unwrap(),
- follower_vm.new_thread("prf/1").await.unwrap(),
- );
-
- futures::try_join!(
- leader_prf.setup(leader_pms.into_value()),
- follower_prf.setup(follower_pms.into_value())
- )
- .unwrap();
-
- let block_cipher_config = BlockCipherConfigBuilder::default()
- .id("aes")
- .build()
- .unwrap();
- let leader_block_cipher = MpcBlockCipher::::new(
- block_cipher_config.clone(),
- leader_vm.new_thread("block_cipher").await.unwrap(),
- );
- let follower_block_cipher = MpcBlockCipher::::new(
- block_cipher_config,
- follower_vm.new_thread("block_cipher").await.unwrap(),
- );
-
- let stream_cipher_config = StreamCipherConfig::builder()
- .id("aes-ctr")
- .transcript_id("tx")
- .build()
- .unwrap();
- let leader_stream_cipher = MpcStreamCipher::::new(
- stream_cipher_config.clone(),
- leader_vm.new_thread_pool("aes-ctr", 4).await.unwrap(),
- );
- let follower_stream_cipher = MpcStreamCipher::::new(
- stream_cipher_config,
- follower_vm.new_thread_pool("aes-ctr", 4).await.unwrap(),
- );
-
- let mut leader_gf2 = ff::ConverterSender::::new(
- ff::SenderConfig::builder()
- .id("gf2")
- .record()
- .build()
- .unwrap(),
- leader_ot_sender.clone(),
- leader_mux.get_channel("gf2").await.unwrap(),
- );
-
- let mut follower_gf2 = ff::ConverterReceiver::::new(
- ff::ReceiverConfig::builder()
- .id("gf2")
- .record()
- .build()
- .unwrap(),
- follower_ot_recvr.clone(),
- follower_mux.get_channel("gf2").await.unwrap(),
- );
-
- let ghash_config = GhashConfig::builder()
- .id("aes_gcm/ghash")
- .initial_block_count(64)
- .build()
- .unwrap();
-
- let leader_ghash = Ghash::new(ghash_config.clone(), leader_gf2.handle().unwrap());
- let follower_ghash = Ghash::new(ghash_config, follower_gf2.handle().unwrap());
-
- let mut leader_aead = MpcAesGcm::new(
- AesGcmConfig::builder()
- .id("aes_gcm")
- .role(AesGcmRole::Leader)
- .build()
- .unwrap(),
- leader_mux.get_channel("aes_gcm").await.unwrap(),
- Box::new(leader_block_cipher),
- Box::new(leader_stream_cipher),
- Box::new(leader_ghash),
- );
-
- let mut follower_aead = MpcAesGcm::new(
- AesGcmConfig::builder()
- .id("aes_gcm")
- .role(AesGcmRole::Follower)
- .build()
- .unwrap(),
- follower_mux.get_channel("aes_gcm").await.unwrap(),
- Box::new(follower_block_cipher),
- Box::new(follower_stream_cipher),
- Box::new(follower_ghash),
- );
-
- let leader_private_key = SecretKey::random(&mut rng);
- let follower_private_key = SecretKey::random(&mut rng);
- let server_public_key = PublicKey::from_secret_scalar(&NonZeroScalar::random(&mut rng));
-
- // Setup complete
-
- let _ = tokio::try_join!(
- leader_ke.compute_client_key(leader_private_key),
- follower_ke.compute_client_key(follower_private_key)
- )
- .unwrap();
-
- leader_ke.set_server_key(server_public_key);
-
- tokio::try_join!(leader_ke.compute_pms(), follower_ke.compute_pms()).unwrap();
-
- let (leader_session_keys, follower_session_keys) = tokio::try_join!(
- leader_prf.compute_session_keys_private([0u8; 32], [0u8; 32]),
- follower_prf.compute_session_keys_blind()
- )
- .unwrap();
-
- let SessionKeys {
- client_write_key: leader_key,
- client_iv: leader_iv,
- ..
- } = leader_session_keys;
-
- let SessionKeys {
- client_write_key: follower_key,
- client_iv: follower_iv,
- ..
- } = follower_session_keys;
-
- tokio::try_join!(
- leader_aead.set_key(leader_key, leader_iv),
- follower_aead.set_key(follower_key, follower_iv)
- )
- .unwrap();
-
- let msg = vec![0u8; 4096];
-
- let _ = tokio::try_join!(
- leader_aead.encrypt_private(vec![0u8; 8], msg.clone(), vec![]),
- follower_aead.encrypt_blind(vec![0u8; 8], msg.len(), vec![])
- )
- .unwrap();
-
- follower_ot_sender.shutdown().await.unwrap();
-
- tokio::try_join!(leader_vm.finalize(), follower_vm.finalize()).unwrap();
- tokio::try_join!(leader_gf2.reveal(), follower_gf2.verify()).unwrap();
-}
diff --git a/components/key-exchange/Cargo.toml b/components/key-exchange/Cargo.toml
deleted file mode 100644
index 0fdcb43a22..0000000000
--- a/components/key-exchange/Cargo.toml
+++ /dev/null
@@ -1,37 +0,0 @@
-[package]
-name = "tlsn-key-exchange"
-authors = ["TLSNotary Team"]
-description = "Implementation of the TLSNotary-specific key-exchange protocol"
-keywords = ["tls", "mpc", "2pc", "pms", "key-exchange"]
-categories = ["cryptography"]
-license = "MIT OR Apache-2.0"
-version = "0.1.0-alpha.3"
-edition = "2021"
-
-[lib]
-name = "key_exchange"
-
-[features]
-default = ["mock"]
-tracing = ["dep:tracing", "tlsn-point-addition/tracing"]
-mock = []
-
-[dependencies]
-mpz-garble = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ecb8c54" }
-mpz-ot = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ecb8c54" }
-mpz-circuits = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ecb8c54" }
-tlsn-utils-aio = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "8d8ffe1" }
-mpz-share-conversion-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ecb8c54" }
-tlsn-point-addition = { path = "../point-addition" }
-p256 = { version = "0.13", features = ["ecdh"] }
-async-trait = "0.1"
-thiserror = "1"
-serde = "1"
-futures = "0.3"
-derive_builder = "0.12"
-tracing = { version = "0.1", optional = true }
-
-[dev-dependencies]
-rand_chacha = "0.3"
-rand_core = "0.6"
-tokio = { version = "1", features = ["macros", "rt", "rt-multi-thread"] }
diff --git a/components/key-exchange/src/circuit.rs b/components/key-exchange/src/circuit.rs
deleted file mode 100644
index 82818ea6cc..0000000000
--- a/components/key-exchange/src/circuit.rs
+++ /dev/null
@@ -1,43 +0,0 @@
-//! This module provides the circuits used in the key exchange protocol
-
-use std::sync::Arc;
-
-use mpz_circuits::{circuits::big_num::nbyte_add_mod_trace, Circuit, CircuitBuilder};
-
-/// NIST P-256 prime big-endian
-static P: [u8; 32] = [
- 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
-];
-
-/// Circuit for combining additive shares of the PMS, twice
-///
-/// # Inputs
-///
-/// 0. PMS_SHARE_A: 32 bytes PMS Additive Share
-/// 1. PMS_SHARE_B: 32 bytes PMS Additive Share
-/// 2. PMS_SHARE_C: 32 bytes PMS Additive Share
-/// 3. PMS_SHARE_D: 32 bytes PMS Additive Share
-///
-/// # Outputs
-/// 0. PMS1: Pre-master Secret = PMS_SHARE_A + PMS_SHARE_B
-/// 1. PMS2: Pre-master Secret = PMS_SHARE_C + PMS_SHARE_D
-/// 2. EQ: Equality check of PMS1 and PMS2
-pub(crate) fn build_pms_circuit() -> Arc {
- let builder = CircuitBuilder::new();
- let share_a = builder.add_array_input::();
- let share_b = builder.add_array_input::();
- let share_c = builder.add_array_input::();
- let share_d = builder.add_array_input::();
-
- let a = nbyte_add_mod_trace(builder.state(), share_a, share_b, P);
- let b = nbyte_add_mod_trace(builder.state(), share_c, share_d, P);
-
- let eq: [_; 32] = std::array::from_fn(|i| a[i] ^ b[i]);
-
- builder.add_output(a);
- builder.add_output(b);
- builder.add_output(eq);
-
- Arc::new(builder.build().expect("pms circuit is valid"))
-}
diff --git a/components/key-exchange/src/exchange.rs b/components/key-exchange/src/exchange.rs
deleted file mode 100644
index a84bdd8548..0000000000
--- a/components/key-exchange/src/exchange.rs
+++ /dev/null
@@ -1,621 +0,0 @@
-//! This module implements the key exchange logic
-
-use async_trait::async_trait;
-use futures::{SinkExt, StreamExt};
-use mpz_garble::{value::ValueRef, Decode, Execute, Load, Memory};
-
-use mpz_share_conversion_core::fields::{p256::P256, Field};
-use p256::{EncodedPoint, PublicKey, SecretKey};
-use point_addition::PointAddition;
-use std::fmt::Debug;
-
-use utils_aio::expect_msg_or_err;
-
-use crate::{
- circuit::build_pms_circuit,
- config::{KeyExchangeConfig, Role},
- KeyExchange, KeyExchangeChannel, KeyExchangeError, KeyExchangeMessage, Pms,
-};
-
-enum State {
- Initialized,
- Setup {
- share_a: ValueRef,
- share_b: ValueRef,
- share_c: ValueRef,
- share_d: ValueRef,
- pms_1: ValueRef,
- pms_2: ValueRef,
- eq: ValueRef,
- },
- KeyExchange {
- share_a: ValueRef,
- share_b: ValueRef,
- share_c: ValueRef,
- share_d: ValueRef,
- pms_1: ValueRef,
- pms_2: ValueRef,
- eq: ValueRef,
- },
- Complete,
- Error,
-}
-
-/// The instance for performing the key exchange protocol
-///
-/// Can be either a leader or a follower depending on the `role` field in [KeyExchangeConfig]
-pub struct KeyExchangeCore {
- /// A channel for exchanging messages between leader and follower
- channel: KeyExchangeChannel,
- /// The sender instance for performing point addition
- point_addition_sender: PS,
- /// The receiver instance for performing point addition
- point_addition_receiver: PR,
- /// MPC executor
- executor: E,
- /// The private key of the party behind this instance, either follower or leader
- private_key: Option,
- /// The public key of the server
- server_key: Option,
- /// The config used for the key exchange protocol
- config: KeyExchangeConfig,
- /// The state of the protocol
- state: State,
-}
-
-impl Debug for KeyExchangeCore
-where
- PS: PointAddition + Send + Debug,
- PR: PointAddition + Send + Debug,
- E: Memory + Execute + Decode + Send,
-{
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- f.debug_struct("KeyExchangeCore")
- .field("channel", &"{{ ... }}")
- .field("point_addition_sender", &"{{ ... }}")
- .field("point_addition_receiver", &"{{ ... }}")
- .field("executor", &"{{ ... }}")
- .field("private_key", &"{{ ... }}")
- .field("server_key", &self.server_key)
- .field("config", &self.config)
- .finish()
- }
-}
-
-impl KeyExchangeCore
-where
- PS: PointAddition + Send + Debug,
- PR: PointAddition + Send + Debug,
- E: Memory + Execute + Decode + Send,
-{
- /// Creates a new [KeyExchangeCore]
- ///
- /// * `channel` - The channel for sending messages between leader and follower
- /// * `point_addition_sender` - The point addition sender instance used during key exchange
- /// * `point_addition_receiver` - The point addition receiver instance used during key exchange
- /// * `executor` - The MPC executor
- /// * `config` - The config used for the key exchange protocol
- #[cfg_attr(
- feature = "tracing",
- tracing::instrument(
- level = "info",
- skip(channel, executor, point_addition_sender, point_addition_receiver),
- ret
- )
- )]
- pub fn new(
- channel: KeyExchangeChannel,
- point_addition_sender: PS,
- point_addition_receiver: PR,
- executor: E,
- config: KeyExchangeConfig,
- ) -> Self {
- Self {
- channel,
- point_addition_sender,
- point_addition_receiver,
- executor,
- private_key: None,
- server_key: None,
- config,
- state: State::Initialized,
- }
- }
-
- async fn compute_pms_shares(&mut self) -> Result<(P256, P256), KeyExchangeError> {
- let state = std::mem::replace(&mut self.state, State::Error);
-
- let State::Setup {
- share_a,
- share_b,
- share_c,
- share_d,
- pms_1,
- pms_2,
- eq,
- } = state
- else {
- todo!()
- };
-
- let server_key = match self.config.role() {
- Role::Leader => {
- // Send server public key to follower
- if let Some(server_key) = &self.server_key {
- self.channel
- .send(KeyExchangeMessage::ServerPublicKey((*server_key).into()))
- .await?;
-
- *server_key
- } else {
- return Err(KeyExchangeError::NoServerKey);
- }
- }
- Role::Follower => {
- // Receive server's public key from leader
- let message =
- expect_msg_or_err!(self.channel, KeyExchangeMessage::ServerPublicKey)?;
- let server_key = message.try_into()?;
-
- self.server_key = Some(server_key);
-
- server_key
- }
- };
-
- let private_key = self
- .private_key
- .take()
- .ok_or(KeyExchangeError::NoPrivateKey)?;
-
- // Compute the leader's/follower's share of the pre-master secret
- //
- // We need to mimic the [diffie-hellman](p256::ecdh::diffie_hellman) function without the
- // [SharedSecret](p256::ecdh::SharedSecret) wrapper, because this makes it harder to get
- // the result as an EC curve point.
- let shared_secret = {
- let public_projective = server_key.to_projective();
- (public_projective * private_key.to_nonzero_scalar().as_ref()).to_affine()
- };
-
- let encoded_point = EncodedPoint::from(PublicKey::from_affine(shared_secret)?);
- let (sender_share, receiver_share) = futures::try_join!(
- self.point_addition_sender
- .compute_x_coordinate_share(encoded_point),
- self.point_addition_receiver
- .compute_x_coordinate_share(encoded_point)
- )?;
-
- self.state = State::KeyExchange {
- share_a,
- share_b,
- share_c,
- share_d,
- pms_1,
- pms_2,
- eq,
- };
-
- match self.config.role() {
- Role::Leader => Ok((sender_share, receiver_share)),
- Role::Follower => Ok((receiver_share, sender_share)),
- }
- }
-
- async fn compute_pms_for(
- &mut self,
- pms_share1: P256,
- pms_share2: P256,
- ) -> Result {
- let state = std::mem::replace(&mut self.state, State::Error);
-
- let State::KeyExchange {
- share_a,
- share_b,
- share_c,
- share_d,
- pms_1,
- pms_2,
- eq,
- } = state
- else {
- todo!()
- };
-
- let pms_share1: [u8; 32] = pms_share1
- .to_be_bytes()
- .try_into()
- .expect("pms share is 32 bytes");
- let pms_share2: [u8; 32] = pms_share2
- .to_be_bytes()
- .try_into()
- .expect("pms share is 32 bytes");
-
- match self.config.role() {
- Role::Leader => {
- self.executor.assign(&share_a, pms_share1)?;
- self.executor.assign(&share_c, pms_share2)?;
- }
- Role::Follower => {
- self.executor.assign(&share_b, pms_share1)?;
- self.executor.assign(&share_d, pms_share2)?;
- }
- }
-
- self.executor
- .execute(
- build_pms_circuit(),
- &[share_a, share_b, share_c, share_d],
- &[pms_1.clone(), pms_2, eq.clone()],
- )
- .await?;
-
- #[cfg(feature = "tracing")]
- tracing::event!(tracing::Level::DEBUG, "Successfully executed PMS circuit!");
-
- let mut outputs = self.executor.decode(&[eq]).await?;
-
- let eq: [u8; 32] = outputs.remove(0).try_into().expect("eq is 32 bytes");
-
- // Eq should be all zeros if pms_1 == pms_2
- if eq != [0u8; 32] {
- return Err(KeyExchangeError::CheckFailed);
- }
-
- self.state = State::Complete;
-
- // Both parties use pms_1 as the pre-master secret
- Ok(Pms::new(pms_1))
- }
-}
-
-#[async_trait]
-impl KeyExchange for KeyExchangeCore
-where
- PS: PointAddition + Send + Debug,
- PR: PointAddition + Send + Debug,
- E: Memory + Load + Execute + Decode + Send,
-{
- #[cfg_attr(
- feature = "tracing",
- tracing::instrument(level = "info", skip(self), ret)
- )]
- fn server_key(&self) -> Option {
- self.server_key
- }
-
- /// Set the server's public key
- #[cfg_attr(feature = "tracing", tracing::instrument(level = "info", skip(self)))]
- fn set_server_key(&mut self, server_key: PublicKey) {
- self.server_key = Some(server_key);
- }
-
- async fn setup(&mut self) -> Result {
- let state = std::mem::replace(&mut self.state, State::Error);
-
- let State::Initialized = state else {
- return Err(KeyExchangeError::InvalidState(
- "expected to be in Initialized state".to_string(),
- ));
- };
-
- let (share_a, share_b, share_c, share_d) = match self.config.role() {
- Role::Leader => {
- let share_a = self
- .executor
- .new_private_input::<[u8; 32]>("pms/share_a")
- .unwrap();
- let share_b = self
- .executor
- .new_blind_input::<[u8; 32]>("pms/share_b")
- .unwrap();
- let share_c = self
- .executor
- .new_private_input::<[u8; 32]>("pms/share_c")
- .unwrap();
- let share_d = self
- .executor
- .new_blind_input::<[u8; 32]>("pms/share_d")
- .unwrap();
-
- (share_a, share_b, share_c, share_d)
- }
- Role::Follower => {
- let share_a = self
- .executor
- .new_blind_input::<[u8; 32]>("pms/share_a")
- .unwrap();
- let share_b = self
- .executor
- .new_private_input::<[u8; 32]>("pms/share_b")
- .unwrap();
- let share_c = self
- .executor
- .new_blind_input::<[u8; 32]>("pms/share_c")
- .unwrap();
- let share_d = self
- .executor
- .new_private_input::<[u8; 32]>("pms/share_d")
- .unwrap();
-
- (share_a, share_b, share_c, share_d)
- }
- };
-
- let pms_1 = self.executor.new_output::<[u8; 32]>("pms/1")?;
- let pms_2 = self.executor.new_output::<[u8; 32]>("pms/2")?;
- let eq = self.executor.new_output::<[u8; 32]>("pms/eq")?;
-
- self.executor
- .load(
- build_pms_circuit(),
- &[
- share_a.clone(),
- share_b.clone(),
- share_c.clone(),
- share_d.clone(),
- ],
- &[pms_1.clone(), pms_2.clone(), eq.clone()],
- )
- .await?;
-
- self.state = State::Setup {
- share_a,
- share_b,
- share_c,
- share_d,
- pms_1: pms_1.clone(),
- pms_2,
- eq,
- };
-
- Ok(Pms::new(pms_1))
- }
-
- /// Compute the client's public key
- ///
- /// The client's public key in this context is the combined public key (EC point addition) of
- /// the leader's public key and the follower's public key.
- #[cfg_attr(
- feature = "tracing",
- tracing::instrument(level = "info", skip(self, private_key), ret, err)
- )]
- async fn compute_client_key(
- &mut self,
- private_key: SecretKey,
- ) -> Result