From 55518c919934736b0d10c3f294e2af431bb71abc Mon Sep 17 00:00:00 2001 From: Ralph Giles Date: Wed, 24 Jul 2024 10:41:30 -0700 Subject: [PATCH 1/3] rewards-proof: replace map.flatten with filter_map Use the combined iterator method to strip out the out-of-bounds terms in the inner product instead of flatten (but should throw an error instead?) This is a more efficient expression of the same process. Addresses a `clippy::map_flatten` lint. --- rewards-proof/benches/proofs_benchmark.rs | 9 +++------ rewards-proof/examples/example_proofs.rs | 3 +-- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/rewards-proof/benches/proofs_benchmark.rs b/rewards-proof/benches/proofs_benchmark.rs index 298cfe3..71ce2df 100644 --- a/rewards-proof/benches/proofs_benchmark.rs +++ b/rewards-proof/benches/proofs_benchmark.rs @@ -45,8 +45,7 @@ fn benchmark_rewardsproof_generation(c: &mut Criterion) { let reward: u64 = state .iter() .zip(policy_vector.iter()) - .map(|(x, y)| x.checked_mul(*y)) - .flatten() + .filter_map(|(x, y)| x.checked_mul(*y)) .sum(); // create generators @@ -97,8 +96,7 @@ fn benchmark_rewardsproof_verification(c: &mut Criterion) { let reward: u64 = state .iter() .zip(policy_vector.iter()) - .map(|(x, y)| x.checked_mul(*y)) - .flatten() + .filter_map(|(x, y)| x.checked_mul(*y)) .sum(); // create variables for linear proof @@ -170,8 +168,7 @@ fn benchmark_rewardsproof_verification_multiple_users( let reward: u64 = state .iter() .zip(policy_vector.iter()) - .map(|(x, y)| x.checked_mul(*y)) - .flatten() + .filter_map(|(x, y)| x.checked_mul(*y)) .sum(); // generate number_of_users proofs diff --git a/rewards-proof/examples/example_proofs.rs b/rewards-proof/examples/example_proofs.rs index 2d4b02d..99b925a 100644 --- a/rewards-proof/examples/example_proofs.rs +++ b/rewards-proof/examples/example_proofs.rs @@ -30,8 +30,7 @@ fn rewards_proof_example() { let reward: u64 = state .iter() .zip(policy_vector.iter()) - .map(|(x, y)| x.checked_mul(*y)) - .flatten() + .filter_map(|(x, y)| x.checked_mul(*y)) .sum(); println!("Policy vector: {:?}", policy_vector); From 7ada69f7fafd9858862f392c54db8a06c9c16f6d Mon Sep 17 00:00:00 2001 From: Ralph Giles Date: Wed, 24 Jul 2024 10:43:36 -0700 Subject: [PATCH 2/3] rewards-proof: correct a typo This message string contained a misspelling. --- rewards-proof/examples/example_proofs.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rewards-proof/examples/example_proofs.rs b/rewards-proof/examples/example_proofs.rs index 99b925a..fb6db53 100644 --- a/rewards-proof/examples/example_proofs.rs +++ b/rewards-proof/examples/example_proofs.rs @@ -70,7 +70,7 @@ fn rewards_proof_example() { policy_vector_scalar, linear_comm, ) { - println!("Rewards proof verification successfull!"); + println!("Rewards proof verification successful!"); } else { println!("Rewards proof verification failed!"); } From 80a9e838ce220d7c4e4a27bdc1e58cc7a1e948f7 Mon Sep 17 00:00:00 2001 From: Ralph Giles Date: Wed, 24 Jul 2024 10:50:49 -0700 Subject: [PATCH 3/3] rewards-proof: Remove redundant closures The associated function here is already a `FnMut` accepting the correct number of arguments, so it's not necessary to wrap it in a separate closure. Addresses a `clippy::redundant_closure` lint. --- rewards-proof/benches/proofs_benchmark.rs | 12 ++++++------ rewards-proof/examples/example_proofs.rs | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/rewards-proof/benches/proofs_benchmark.rs b/rewards-proof/benches/proofs_benchmark.rs index 71ce2df..daab77f 100644 --- a/rewards-proof/benches/proofs_benchmark.rs +++ b/rewards-proof/benches/proofs_benchmark.rs @@ -31,14 +31,14 @@ fn benchmark_rewardsproof_generation(c: &mut Criterion) { let policy_vector_scalar: Vec = policy_vector .clone() .into_iter() - .map(|u64_value| C::ScalarField::from(u64_value)) + .map(C::ScalarField::from) .collect(); // private value let state: Vec = (0..*size).map(|_| rng.gen_range(0..10)).collect(); let state_scalar: Vec = state .clone() .into_iter() - .map(|u64_value| C::ScalarField::from(u64_value)) + .map(C::ScalarField::from) .collect(); // reward = @@ -82,14 +82,14 @@ fn benchmark_rewardsproof_verification(c: &mut Criterion) { let policy_vector_scalar: Vec = policy_vector .clone() .into_iter() - .map(|u64_value| C::ScalarField::from(u64_value)) + .map(C::ScalarField::from) .collect(); // private value let state: Vec = (0..*size).map(|_| rng.gen_range(0..10)).collect(); let state_scalar: Vec = state .clone() .into_iter() - .map(|u64_value| C::ScalarField::from(u64_value)) + .map(C::ScalarField::from) .collect(); // reward = @@ -154,14 +154,14 @@ fn benchmark_rewardsproof_verification_multiple_users( let policy_vector_scalar: Vec = policy_vector .clone() .into_iter() - .map(|u64_value| C::ScalarField::from(u64_value)) + .map(C::ScalarField::from) .collect(); // private value let state: Vec = (0..incentive_size).map(|_| rng.gen_range(0..10)).collect(); let state_scalar: Vec = state .clone() .into_iter() - .map(|u64_value| C::ScalarField::from(u64_value)) + .map(C::ScalarField::from) .collect(); // reward = diff --git a/rewards-proof/examples/example_proofs.rs b/rewards-proof/examples/example_proofs.rs index fb6db53..5609aea 100644 --- a/rewards-proof/examples/example_proofs.rs +++ b/rewards-proof/examples/example_proofs.rs @@ -41,13 +41,13 @@ fn rewards_proof_example() { let state_scalar: Vec = state .clone() .into_iter() - .map(|u64_value| C::ScalarField::from(u64_value)) + .map(C::ScalarField::from) .collect(); let policy_vector_scalar: Vec = policy_vector .clone() .into_iter() - .map(|u64_value| C::ScalarField::from(u64_value)) + .map(C::ScalarField::from) .collect(); // generate rewards proof