From 1d117fb676d76d868b553e1c16e242203d0ff5eb Mon Sep 17 00:00:00 2001 From: cryptoAtwill Date: Tue, 1 Oct 2024 13:32:19 +0800 Subject: [PATCH 01/22] integrate cert --- Cargo.lock | 3 + Cargo.toml | 1 + fendermint/crypto/Cargo.toml | 2 + fendermint/crypto/src/lib.rs | 1 + fendermint/crypto/src/quorum.rs | 253 ++++++++++++++++++++ fendermint/crypto/src/secp.rs | 6 +- fendermint/vm/genesis/src/lib.rs | 1 + fendermint/vm/topdown/Cargo.toml | 1 + fendermint/vm/topdown/src/lib.rs | 10 +- fendermint/vm/topdown/src/observation.rs | 53 ++-- fendermint/vm/topdown/src/syncer/payload.rs | 10 +- fendermint/vm/topdown/src/syncer/poll.rs | 3 +- fendermint/vm/topdown/src/syncer/store.rs | 5 +- fendermint/vm/topdown/src/vote/payload.rs | 10 +- fendermint/vm/topdown/src/vote/store.rs | 47 +++- fendermint/vm/topdown/src/vote/tally.rs | 29 ++- 16 files changed, 393 insertions(+), 42 deletions(-) create mode 100644 fendermint/crypto/src/quorum.rs diff --git a/Cargo.lock b/Cargo.lock index f3b5dbb69..c6d879747 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2996,6 +2996,8 @@ dependencies = [ "fvm_ipld_encoding", "libsecp256k1", "multihash 0.18.1", + "num-rational", + "num-traits", "rand", "serde", "zeroize", @@ -3464,6 +3466,7 @@ dependencies = [ "ipc_ipld_resolver", "libp2p", "multihash 0.18.1", + "num-rational", "num-traits", "prometheus", "rand", diff --git a/Cargo.toml b/Cargo.toml index bb5234eda..e79ac3a05 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -123,6 +123,7 @@ multihash = { version = "0.18.1", default-features = false, features = [ ] } num-bigint = "0.4" num-derive = "0.3" +num-rational = "0.4.1" num-traits = "0.2" num_enum = "0.7.2" paste = "1" diff --git a/fendermint/crypto/Cargo.toml b/fendermint/crypto/Cargo.toml index ddf29ee9b..4920498c5 100644 --- a/fendermint/crypto/Cargo.toml +++ b/fendermint/crypto/Cargo.toml @@ -16,6 +16,8 @@ rand = { workspace = true } zeroize = { workspace = true } multihash = { workspace = true, features = ["multihash-impl", "blake2b"] } serde = { workspace = true, optional = true } +num-traits = { workspace = true } +num-rational = { workspace = true } [dev-dependencies] fvm_ipld_encoding = { workspace = true } diff --git a/fendermint/crypto/src/lib.rs b/fendermint/crypto/src/lib.rs index 10a69b1f3..57d5cb0fc 100644 --- a/fendermint/crypto/src/lib.rs +++ b/fendermint/crypto/src/lib.rs @@ -1,6 +1,7 @@ // Copyright 2022-2024 Protocol Labs // SPDX-License-Identifier: Apache-2.0, MIT +pub mod quorum; pub mod secp; use base64::engine::GeneralPurpose; diff --git a/fendermint/crypto/src/quorum.rs b/fendermint/crypto/src/quorum.rs new file mode 100644 index 000000000..d099fe2be --- /dev/null +++ b/fendermint/crypto/src/quorum.rs @@ -0,0 +1,253 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use crate::secp::RecoverableECDSASignature; +use anyhow::anyhow; +use libsecp256k1::PublicKey; +use num_rational::Ratio; +use num_traits::Unsigned; + +/// The payload bytes that has been certified by a majority of signer. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "with_serde", derive(serde::Serialize, serde::Deserialize))] +pub struct ECDSACertificate { + payload: T, + /// An array of nillable signatures of all active validators in deterministic order. + signatures: Vec>, +} + +impl ECDSACertificate { + pub fn new_of_size(payload: T, size: usize) -> Self { + Self { + payload, + signatures: vec![None; size], + } + } + + pub fn payload(&self) -> &T { + &self.payload + } + + #[inline] + fn quorum_threshold(total: W, threshold_ratio: Ratio) -> W + where + W: Unsigned + Copy, + { + total * *threshold_ratio.numer() / *threshold_ratio.denom() + W::one() + } +} + +impl + PartialEq> ECDSACertificate { + pub fn set_signature( + &mut self, + idx: usize, + payload: &T, + pk: &PublicKey, + sig: RecoverableECDSASignature, + ) -> anyhow::Result<()> { + if self.payload != *payload { + return Err(anyhow!("invalid payload")); + } + + if !sig.verify(payload.as_ref(), pk)? { + return Err(anyhow!("signature not match publick key")); + } + + self.signatures[idx] = Some(sig); + + Ok(()) + } + + /// Checks if a quorum is reached from external power table given the payload and sigatures + pub fn quorum_reached<'a, W, I>( + &self, + power_table: I, + threshold_ratio: Ratio, + ) -> anyhow::Result + where + W: Copy + Unsigned + PartialOrd, + I: Iterator, + { + let (total_weight, signed_weight) = self.calculate_weights::(power_table)?; + Ok(signed_weight >= Self::quorum_threshold::(total_weight, threshold_ratio)) + } + + pub fn calculate_weights<'a, W, I>(&self, power_table: I) -> anyhow::Result<(W, W)> + where + W: Copy + Unsigned, + I: Iterator, + { + let mut total_weight = W::zero(); + let mut total_pkeys = 0usize; + + let mut signed_weight = W::zero(); + + let payload_bytes = self.payload.as_ref(); + + for ((pk, weight), maybe_sig) in power_table.zip(self.signatures.iter()) { + total_weight = total_weight + weight; + total_pkeys += 1; + + let Some(ref sig) = maybe_sig else { + continue; + }; + + let (rec_pk, _) = sig.recover(payload_bytes)?; + if *pk != rec_pk { + return Err(anyhow!("signature not signed by the public key")); + } + + signed_weight = signed_weight + weight; + } + + if total_pkeys != self.signatures.len() { + return Err(anyhow!( + "invalid number of public keys, expecting: {}, received: {}", + self.signatures.len(), + total_pkeys + )); + } + + Ok((total_weight, signed_weight)) + } +} + +#[cfg(test)] +mod tests { + use crate::quorum::ECDSACertificate; + use crate::secp::RecoverableECDSASignature; + use crate::SecretKey; + use num_rational::Ratio; + use rand::{random, thread_rng}; + + fn random_secret_keys(num: usize) -> Vec { + let mut rng = thread_rng(); + (0..num).map(|_| SecretKey::random(&mut rng)).collect() + } + + #[test] + fn test_quorum_all_signed_works() { + let sks = random_secret_keys(11); + + let payload = vec![10u8; 100]; + + let mut quorum = ECDSACertificate::new_of_size(payload.clone(), sks.len()); + let ratio = Ratio::new(2, 3); + for (i, sk) in sks.iter().enumerate() { + let sig = RecoverableECDSASignature::sign(sk, &payload).unwrap(); + quorum + .set_signature(i, &payload, &sk.public_key(), sig) + .unwrap(); + } + + let weights = sks + .iter() + .map(|sk| (sk.public_key(), 1u64)) + .collect::>(); + let is_ok = quorum + .quorum_reached::<_, _>(weights.iter().map(|(pk, weight)| (pk, *weight)), ratio) + .unwrap(); + assert!(is_ok); + } + + #[test] + fn test_no_quorum_works() { + let sks = random_secret_keys(11); + + let payload = vec![10u8; 100]; + let ratio = Ratio::new(2, 3); + + let mut quorum = ECDSACertificate::new_of_size(payload.clone(), sks.len()); + for (i, sk) in sks.iter().enumerate() { + let sig = RecoverableECDSASignature::sign(sk, &payload).unwrap(); + if i % 3 == 0 { + quorum + .set_signature(i, &payload, &sk.public_key(), sig) + .unwrap(); + } + } + + let weights = sks + .iter() + .map(|sk| (sk.public_key(), 1u64)) + .collect::>(); + let is_reached = quorum + .quorum_reached::<_, _>(weights.iter().map(|(pk, weight)| (pk, *weight)), ratio) + .unwrap(); + assert!(!is_reached); + } + + #[test] + fn test_calculate_weight_all_signed_works() { + let sks = random_secret_keys(11); + + let payload = vec![10u8; 100]; + + let mut quorum = ECDSACertificate::new_of_size(payload.clone(), sks.len()); + for (i, sk) in sks.iter().enumerate() { + let sig = RecoverableECDSASignature::sign(sk, &payload).unwrap(); + quorum + .set_signature(i, &payload, &sk.public_key(), sig) + .unwrap(); + } + + let mut total_expected = 0; + let weights = sks + .iter() + .map(|sk| { + let n = random::() % 100000; + total_expected += n; + (sk.public_key(), n) + }) + .collect::>(); + let (total, signed) = quorum + .calculate_weights::<_, _>(weights.iter().map(|(pk, weight)| (pk, *weight))) + .unwrap(); + + assert_eq!(total, signed); + assert_eq!(total, total_expected); + } + + #[test] + fn test_random_works() { + let sks = random_secret_keys(11); + + let payload = vec![10u8; 100]; + + let mut quorum = ECDSACertificate::new_of_size(payload.clone(), sks.len()); + let mut should_signs = vec![]; + for (i, sk) in sks.iter().enumerate() { + let sig = RecoverableECDSASignature::sign(sk, &payload).unwrap(); + + let should_sign = random::(); + if should_sign { + quorum + .set_signature(i, &payload, &sk.public_key(), sig) + .unwrap(); + } + should_signs.push(should_sign); + } + + let mut total_expected = 0; + let weights = sks + .iter() + .map(|sk| { + let n = random::() % 100000; + total_expected += n; + (sk.public_key(), n) + }) + .collect::>(); + let (total, signed) = quorum + .calculate_weights::<_, _>(weights.iter().map(|(pk, weight)| (pk, *weight))) + .unwrap(); + + let mut signed_expected = 0; + for (i, should_sign) in should_signs.iter().enumerate() { + if *should_sign { + signed_expected += weights[i].1; + } + } + assert_eq!(total, total_expected); + assert_eq!(signed, signed_expected); + } +} diff --git a/fendermint/crypto/src/secp.rs b/fendermint/crypto/src/secp.rs index 9c6931127..7ea5c8a3f 100644 --- a/fendermint/crypto/src/secp.rs +++ b/fendermint/crypto/src/secp.rs @@ -20,7 +20,7 @@ impl RecoverableECDSASignature { Ok(Self((rec_id.serialize(), sig.serialize()))) } - pub fn recover(self, raw_message: &[u8]) -> anyhow::Result<(PublicKey, [u8; 64])> { + pub fn recover(&self, raw_message: &[u8]) -> anyhow::Result<(PublicKey, &[u8; 64])> { let v = Code::Blake2b256.digest(raw_message); let message = libsecp256k1::Message::parse_slice(v.digest())?; @@ -30,10 +30,10 @@ impl RecoverableECDSASignature { let rec_id = RecoveryId::parse(self.0 .0)?; let pk = recover(&message, &signature, &rec_id)?; - Ok((pk, self.0 .1)) + Ok((pk, &self.0 .1)) } - pub fn verify(self, raw_message: &[u8], pk: &PublicKey) -> anyhow::Result { + pub fn verify(&self, raw_message: &[u8], pk: &PublicKey) -> anyhow::Result { let (recovered_pk, _) = self.recover(raw_message)?; Ok(recovered_pk == *pk) } diff --git a/fendermint/vm/genesis/src/lib.rs b/fendermint/vm/genesis/src/lib.rs index adc172245..5d7765f4e 100644 --- a/fendermint/vm/genesis/src/lib.rs +++ b/fendermint/vm/genesis/src/lib.rs @@ -7,6 +7,7 @@ use anyhow::anyhow; use fvm_shared::bigint::{BigInt, Integer}; use serde::{Deserialize, Serialize}; use serde_with::serde_as; +use std::cmp::Ordering; use std::fmt::{Display, Formatter}; use std::hash::{Hash, Hasher}; diff --git a/fendermint/vm/topdown/Cargo.toml b/fendermint/vm/topdown/Cargo.toml index 2251c20e5..18850b302 100644 --- a/fendermint/vm/topdown/Cargo.toml +++ b/fendermint/vm/topdown/Cargo.toml @@ -31,6 +31,7 @@ tokio = { workspace = true } tracing = { workspace = true } prometheus = { workspace = true } arbitrary = { workspace = true } +num-rational = { workspace = true } multihash = { workspace = true } diff --git a/fendermint/vm/topdown/src/lib.rs b/fendermint/vm/topdown/src/lib.rs index d547fe526..3047ec439 100644 --- a/fendermint/vm/topdown/src/lib.rs +++ b/fendermint/vm/topdown/src/lib.rs @@ -114,7 +114,7 @@ impl Config { /// majority of subnet validators. DAG-CBOR encoded, embedded in CertifiedCheckpoint. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub enum Checkpoint { - V1(Ballot) + V1(Ballot), } /// The finality view for IPC parent at certain height. @@ -206,19 +206,19 @@ pub(crate) fn is_null_round_str(s: &str) -> bool { impl Checkpoint { pub fn target_height(&self) -> BlockHeight { match self { - Checkpoint::V1(b) => b.parent_height + Checkpoint::V1(b) => b.parent_height, } } pub fn target_hash(&self) -> &Bytes { match self { - Checkpoint::V1(b) => &b.parent_hash + Checkpoint::V1(b) => &b.parent_hash, } } pub fn cumulative_effects_comm(&self) -> &Bytes { match self { - Checkpoint::V1(b) => &b.cumulative_effects_comm + Checkpoint::V1(b) => &b.cumulative_effects_comm, } } -} \ No newline at end of file +} diff --git a/fendermint/vm/topdown/src/observation.rs b/fendermint/vm/topdown/src/observation.rs index e6637d37c..b18379e62 100644 --- a/fendermint/vm/topdown/src/observation.rs +++ b/fendermint/vm/topdown/src/observation.rs @@ -1,17 +1,17 @@ // Copyright 2022-2024 Protocol Labs // SPDX-License-Identifier: Apache-2.0, MIT -use std::cmp::min; use crate::syncer::error::Error; use crate::syncer::store::ParentViewStore; use crate::{BlockHash, BlockHeight, Bytes, Checkpoint}; use arbitrary::Arbitrary; -use serde::{Deserialize, Serialize}; -use std::fmt::{Display, Formatter}; use cid::Cid; use fvm_ipld_encoding::DAG_CBOR; use multihash::Code; use multihash::MultihashDigest; +use serde::{Deserialize, Serialize}; +use std::cmp::min; +use std::fmt::{Display, Formatter}; use crate::syncer::payload::ParentBlockView; @@ -68,16 +68,20 @@ pub fn deduce_new_observation( if latest_height < checkpoint.target_height() { tracing::info!("committed vote height more than latest parent view"); - return Err(Error::CommittedParentHeightNotPurged) + return Err(Error::CommittedParentHeightNotPurged); } let max_observation_height = checkpoint.target_height() + config.max_observation_range(); let candidate_height = min(max_observation_height, latest_height); - tracing::debug!(max_observation_height, candidate_height, "propose observation height"); + tracing::debug!( + max_observation_height, + candidate_height, + "propose observation height" + ); // aggregate commitment for the observation let mut agg = LinearizedParentBlockView::from(checkpoint); - for h in checkpoint.target_height()+1..=candidate_height { + for h in checkpoint.target_height() + 1..=candidate_height { let Some(p) = store.get(h)? else { tracing::debug!(height = h, "not parent block view"); return Err(Error::MissingBlockView(h, candidate_height)); @@ -88,7 +92,10 @@ pub fn deduce_new_observation( // TODO: integrate local hash let observation = agg.into_commitment(vec![])?; - tracing::info!(height = observation.ballot.parent_height, "new observation derived"); + tracing::info!( + height = observation.ballot.parent_height, + "new observation derived" + ); Ok(observation) } @@ -105,6 +112,12 @@ impl Display for Ballot { } } +impl AsRef<[u8]> for Ballot { + fn as_ref(&self) -> &[u8] { + todo!() + } +} + impl Ballot { pub fn parent_height(&self) -> BlockHeight { self.parent_height @@ -131,7 +144,8 @@ impl ObservationCommitment { impl ObservationConfig { pub fn max_observation_range(&self) -> BlockHeight { - self.max_observation_range.unwrap_or(DEFAULT_MAX_OBSERVATION_RANGE) + self.max_observation_range + .unwrap_or(DEFAULT_MAX_OBSERVATION_RANGE) } } @@ -153,14 +167,18 @@ impl From<&Checkpoint> for LinearizedParentBlockView { impl LinearizedParentBlockView { fn new_commitment(&mut self, to_append: Bytes) { - let bytes = [self.cumulative_effects_comm.as_slice(), to_append.as_slice()].concat(); + let bytes = [ + self.cumulative_effects_comm.as_slice(), + to_append.as_slice(), + ] + .concat(); let cid = Cid::new_v1(DAG_CBOR, Code::Blake2b256.digest(&bytes)); self.cumulative_effects_comm = cid.to_bytes(); } - pub fn append(&mut self, view: ParentBlockView) -> Result<(), Error>{ + pub fn append(&mut self, view: ParentBlockView) -> Result<(), Error> { if self.parent_height + 1 != view.parent_height { - return Err(Error::NotSequential) + return Err(Error::NotSequential); } self.parent_height += 1; @@ -176,8 +194,15 @@ impl LinearizedParentBlockView { fn into_commitment(self, local_hash: BlockHash) -> Result { let Some(hash) = self.parent_hash else { - return Err(Error::CannotCommitObservationAtNullBlock(self.parent_height)); + return Err(Error::CannotCommitObservationAtNullBlock( + self.parent_height, + )); }; - Ok(ObservationCommitment::new(local_hash, self.parent_height, hash, self.cumulative_effects_comm)) + Ok(ObservationCommitment::new( + local_hash, + self.parent_height, + hash, + self.cumulative_effects_comm, + )) } -} \ No newline at end of file +} diff --git a/fendermint/vm/topdown/src/syncer/payload.rs b/fendermint/vm/topdown/src/syncer/payload.rs index b68824e1e..0d855cafc 100644 --- a/fendermint/vm/topdown/src/syncer/payload.rs +++ b/fendermint/vm/topdown/src/syncer/payload.rs @@ -1,14 +1,14 @@ // Copyright 2022-2024 Protocol Labs // SPDX-License-Identifier: Apache-2.0, MIT +use crate::syncer::error::Error; +use crate::{BlockHash, BlockHeight, Bytes}; use cid::Cid; use fvm_ipld_encoding::DAG_CBOR; -use multihash::Code; -use crate::{BlockHash, BlockHeight, Bytes}; use ipc_api::cross::IpcEnvelope; use ipc_api::staking::StakingChangeRequest; +use multihash::Code; use multihash::MultihashDigest; -use crate::syncer::error::Error; #[derive(Clone, Debug)] pub struct ParentBlockViewPayload { @@ -55,8 +55,8 @@ impl ParentBlockView { return Ok(Cid::default().to_bytes()); }; - let bytes = fvm_ipld_encoding::to_vec(&(&p.xnet_msgs, &p.validator_changes)) - .map_err(|e| { + let bytes = + fvm_ipld_encoding::to_vec(&(&p.xnet_msgs, &p.validator_changes)).map_err(|e| { tracing::error!(err = e.to_string(), "cannot serialize parent block view"); Error::CannotSerializeParentBlockView })?; diff --git a/fendermint/vm/topdown/src/syncer/poll.rs b/fendermint/vm/topdown/src/syncer/poll.rs index 01e6b260f..125491fae 100644 --- a/fendermint/vm/topdown/src/syncer/poll.rs +++ b/fendermint/vm/topdown/src/syncer/poll.rs @@ -222,7 +222,8 @@ where let view = fetch_data(&self.parent_proxy, height, block_hash_res.block_hash).await?; self.store.store(view.clone())?; - let commitment = deduce_new_observation(&self.store, &self.last_finalized, &self.config.observation)?; + let commitment = + deduce_new_observation(&self.store, &self.last_finalized, &self.config.observation)?; // if there is an error, ignore, we can always try next loop let _ = self .event_broadcast diff --git a/fendermint/vm/topdown/src/syncer/store.rs b/fendermint/vm/topdown/src/syncer/store.rs index d4c7230df..f38b4dccb 100644 --- a/fendermint/vm/topdown/src/syncer/store.rs +++ b/fendermint/vm/topdown/src/syncer/store.rs @@ -27,7 +27,8 @@ pub struct InMemoryParentViewStore { impl ParentViewStore for InMemoryParentViewStore { fn store(&mut self, view: ParentBlockView) -> Result<(), Error> { - self.inner.append(view.parent_height, view) + self.inner + .append(view.parent_height, view) .map_err(|_| Error::NonSequentialParentViewInsert) } @@ -47,4 +48,4 @@ impl ParentViewStore for InMemoryParentViewStore { fn max_parent_view_height(&self) -> Result, Error> { Ok(self.inner.upper_bound()) } -} \ No newline at end of file +} diff --git a/fendermint/vm/topdown/src/vote/payload.rs b/fendermint/vm/topdown/src/vote/payload.rs index 22ae8ac20..59e0337b8 100644 --- a/fendermint/vm/topdown/src/vote/payload.rs +++ b/fendermint/vm/topdown/src/vote/payload.rs @@ -3,7 +3,7 @@ use crate::observation::{Ballot, ObservationCommitment}; use crate::vote::Weight; -use crate::BlockHeight; +use crate::{BlockHeight, Bytes}; use anyhow::anyhow; use fendermint_crypto::secp::RecoverableECDSASignature; use fendermint_crypto::SecretKey; @@ -49,7 +49,7 @@ impl Vote { pub fn v1_checked(obs: CertifiedObservation) -> anyhow::Result { let to_sign = fvm_ipld_encoding::to_vec(&obs.observed)?; - let (pk, _) = obs.signature.clone().recover(&to_sign)?; + let (pk, _) = obs.signature.recover(&to_sign)?; Ok(Self::V1 { validator: ValidatorKey::new(pk), @@ -68,6 +68,12 @@ impl Vote { Self::V1 { payload, .. } => &payload.observed.ballot, } } + + pub fn ballot_sig(&self) -> &RecoverableECDSASignature { + match self { + Self::V1 { payload, .. } => &payload.signature, + } + } } impl TryFrom<&[u8]> for Vote { diff --git a/fendermint/vm/topdown/src/vote/store.rs b/fendermint/vm/topdown/src/vote/store.rs index 10de5a238..b88ab4686 100644 --- a/fendermint/vm/topdown/src/vote/store.rs +++ b/fendermint/vm/topdown/src/vote/store.rs @@ -6,6 +6,7 @@ use crate::vote::error::Error; use crate::vote::payload::{PowerTable, Vote}; use crate::vote::Weight; use crate::BlockHeight; +use fendermint_crypto::quorum::ECDSACertificate; use fendermint_vm_genesis::ValidatorKey; use std::collections::btree_map::Entry; use std::collections::{BTreeMap, HashMap}; @@ -83,11 +84,15 @@ impl VoteStore for InMemoryVoteStore { } /// The aggregated votes from different validators. -pub struct VoteAgg<'a>(Vec<&'a Vote>); +pub struct VoteAgg<'a>(HashMap); impl<'a> VoteAgg<'a> { pub fn new(votes: Vec<&'a Vote>) -> Self { - Self(votes) + let mut map = HashMap::new(); + for v in votes { + map.insert(v.voter(), v); + } + Self(map) } pub fn is_empty(&self) -> bool { @@ -95,16 +100,14 @@ impl<'a> VoteAgg<'a> { } pub fn into_owned(self) -> Vec { - self.0.into_iter().cloned().collect() + self.0.into_values().cloned().collect() } pub fn ballot_weights(&self, power_table: &PowerTable) -> Vec<(&Ballot, Weight)> { let mut votes: Vec<(&Ballot, Weight)> = Vec::new(); - for v in self.0.iter() { - let validator = v.voter(); - - let power = power_table.get(&validator).cloned().unwrap_or(0); + for (validator, v) in self.0.iter() { + let power = power_table.get(validator).cloned().unwrap_or(0); if power == 0 { continue; } @@ -118,6 +121,36 @@ impl<'a> VoteAgg<'a> { votes } + + /// Generate a cert from the ordered validator keys and the target observation as payload + pub fn generate_cert( + &self, + ordered_validators: Vec<(&ValidatorKey, &Weight)>, + ballot: &Ballot, + ) -> Result, Error> { + let mut cert = ECDSACertificate::new_of_size(ballot.clone(), ordered_validators.len()); + + for (idx, (validator, _)) in ordered_validators.into_iter().enumerate() { + let Some(vote) = self.0.get(validator) else { + continue; + }; + + if vote.ballot() == *ballot { + cert.set_signature( + idx, + ballot, + validator.public_key(), + vote.ballot_sig().clone(), + ) + .map_err(|e| { + tracing::error!(err = e.to_string(), "cannot verify signature"); + Error::VoteCannotBeValidated + })?; + } + } + + Ok(cert) + } } #[cfg(test)] diff --git a/fendermint/vm/topdown/src/vote/tally.rs b/fendermint/vm/topdown/src/vote/tally.rs index cfce8c526..ef20e8b5c 100644 --- a/fendermint/vm/topdown/src/vote/tally.rs +++ b/fendermint/vm/topdown/src/vote/tally.rs @@ -7,7 +7,10 @@ use crate::vote::payload::{PowerTable, PowerUpdates, Vote}; use crate::vote::store::VoteStore; use crate::vote::Weight; use crate::BlockHeight; +use fendermint_crypto::quorum::ECDSACertificate; use fendermint_vm_genesis::ValidatorKey; +use num_rational::Ratio; +use std::cmp::Ordering; use std::collections::HashMap; /// VoteTally aggregates different votes received from various validators in the network @@ -24,6 +27,9 @@ pub(crate) struct VoteTally { /// The latest height that was voted to be finalized and committed to child blockchian last_finalized_height: BlockHeight, + + /// The quorum threshold ratio required for a quorum + quorum_ratio: Ratio, } impl VoteTally { @@ -44,6 +50,7 @@ impl VoteTally { power_table: HashMap::from_iter(power_table), votes: store, last_finalized_height, + quorum_ratio: Ratio::new(2, 3), }) } @@ -66,7 +73,7 @@ impl VoteTally { /// The equivalent formula can be found in CometBFT [here](https://github.com/cometbft/cometbft/blob/a8991d63e5aad8be82b90329b55413e3a4933dc0/types/vote_set.go#L307). pub fn quorum_threshold(&self) -> Weight { let total_weight: Weight = self.power_table.values().sum(); - total_weight * 2 / 3 + 1 + total_weight * self.quorum_ratio.numer() / self.quorum_ratio.denom() } /// Return the height of the first entry in the chain. @@ -143,7 +150,7 @@ impl VoteTally { } /// Find a block on the (from our perspective) finalized chain that gathered enough votes from validators. - pub fn find_quorum(&self) -> Result, Error> { + pub fn find_quorum(&self) -> Result>, Error> { let quorum_threshold = self.quorum_threshold(); let Some(max_height) = self.votes.latest_vote_height()? else { tracing::info!("vote store has no vote yet, skip finding quorum"); @@ -163,7 +170,8 @@ impl VoteTally { ); if weight >= quorum_threshold { - return Ok(Some(ballot.clone())); + let cert = votes.generate_cert(self.ordered_validators(), ballot)?; + return Ok(Some(cert)); } } @@ -209,6 +217,21 @@ impl VoteTally { } } } + + fn ordered_validators(&self) -> Vec<(&ValidatorKey, &Weight)> { + let mut sorted_powers = self.power_table.iter().collect::>(); + + sorted_powers.sort_by(|(a, b)| { + let cmp = b.1.cmp(a.1); + if cmp != Ordering::Equal { + cmp + } else { + b.0.cmp(a.0) + } + }); + + sorted_powers + } } #[cfg(test)] From a4a959e64775c54d4bec479704abc2bab31e9dda Mon Sep 17 00:00:00 2001 From: cryptoAtwill Date: Tue, 1 Oct 2024 16:40:54 +0800 Subject: [PATCH 02/22] integrate cert --- fendermint/crypto/Cargo.toml | 5 ++-- fendermint/crypto/src/quorum.rs | 34 ++++++++--------------- fendermint/vm/genesis/src/lib.rs | 14 ++++++++++ fendermint/vm/topdown/src/observation.rs | 4 +++ fendermint/vm/topdown/src/vote/mod.rs | 5 ++-- fendermint/vm/topdown/src/vote/payload.rs | 7 +++-- fendermint/vm/topdown/src/vote/store.rs | 11 ++++---- fendermint/vm/topdown/src/vote/tally.rs | 11 ++++---- 8 files changed, 49 insertions(+), 42 deletions(-) diff --git a/fendermint/crypto/Cargo.toml b/fendermint/crypto/Cargo.toml index 4920498c5..5ba6bc2a5 100644 --- a/fendermint/crypto/Cargo.toml +++ b/fendermint/crypto/Cargo.toml @@ -16,12 +16,11 @@ rand = { workspace = true } zeroize = { workspace = true } multihash = { workspace = true, features = ["multihash-impl", "blake2b"] } serde = { workspace = true, optional = true } +fvm_ipld_encoding = { workspace = true, optional = true } num-traits = { workspace = true } num-rational = { workspace = true } -[dev-dependencies] -fvm_ipld_encoding = { workspace = true } [features] -default = ["with_serde"] +default = ["with_serde", "fvm_ipld_encoding"] with_serde = ["serde"] \ No newline at end of file diff --git a/fendermint/crypto/src/quorum.rs b/fendermint/crypto/src/quorum.rs index d099fe2be..379788636 100644 --- a/fendermint/crypto/src/quorum.rs +++ b/fendermint/crypto/src/quorum.rs @@ -30,26 +30,22 @@ impl ECDSACertificate { #[inline] fn quorum_threshold(total: W, threshold_ratio: Ratio) -> W - where - W: Unsigned + Copy, + where + W: Unsigned + Copy, { total * *threshold_ratio.numer() / *threshold_ratio.denom() + W::one() } } -impl + PartialEq> ECDSACertificate { +#[cfg(feature = "with_serde")] +impl ECDSACertificate { pub fn set_signature( &mut self, idx: usize, - payload: &T, pk: &PublicKey, sig: RecoverableECDSASignature, ) -> anyhow::Result<()> { - if self.payload != *payload { - return Err(anyhow!("invalid payload")); - } - - if !sig.verify(payload.as_ref(), pk)? { + if !sig.verify(&fvm_ipld_encoding::to_vec(&self.payload)?, pk)? { return Err(anyhow!("signature not match publick key")); } @@ -82,7 +78,7 @@ impl + PartialEq> ECDSACertificate { let mut signed_weight = W::zero(); - let payload_bytes = self.payload.as_ref(); + let payload_bytes = fvm_ipld_encoding::to_vec(&self.payload)?; for ((pk, weight), maybe_sig) in power_table.zip(self.signatures.iter()) { total_weight = total_weight + weight; @@ -92,7 +88,7 @@ impl + PartialEq> ECDSACertificate { continue; }; - let (rec_pk, _) = sig.recover(payload_bytes)?; + let (rec_pk, _) = sig.recover(payload_bytes.as_slice())?; if *pk != rec_pk { return Err(anyhow!("signature not signed by the public key")); } @@ -135,9 +131,7 @@ mod tests { let ratio = Ratio::new(2, 3); for (i, sk) in sks.iter().enumerate() { let sig = RecoverableECDSASignature::sign(sk, &payload).unwrap(); - quorum - .set_signature(i, &payload, &sk.public_key(), sig) - .unwrap(); + quorum.set_signature(i, &sk.public_key(), sig).unwrap(); } let weights = sks @@ -161,9 +155,7 @@ mod tests { for (i, sk) in sks.iter().enumerate() { let sig = RecoverableECDSASignature::sign(sk, &payload).unwrap(); if i % 3 == 0 { - quorum - .set_signature(i, &payload, &sk.public_key(), sig) - .unwrap(); + quorum.set_signature(i, &sk.public_key(), sig).unwrap(); } } @@ -186,9 +178,7 @@ mod tests { let mut quorum = ECDSACertificate::new_of_size(payload.clone(), sks.len()); for (i, sk) in sks.iter().enumerate() { let sig = RecoverableECDSASignature::sign(sk, &payload).unwrap(); - quorum - .set_signature(i, &payload, &sk.public_key(), sig) - .unwrap(); + quorum.set_signature(i, &sk.public_key(), sig).unwrap(); } let mut total_expected = 0; @@ -221,9 +211,7 @@ mod tests { let should_sign = random::(); if should_sign { - quorum - .set_signature(i, &payload, &sk.public_key(), sig) - .unwrap(); + quorum.set_signature(i, &sk.public_key(), sig).unwrap(); } should_signs.push(should_sign); } diff --git a/fendermint/vm/genesis/src/lib.rs b/fendermint/vm/genesis/src/lib.rs index 5d7765f4e..6bc4794e5 100644 --- a/fendermint/vm/genesis/src/lib.rs +++ b/fendermint/vm/genesis/src/lib.rs @@ -162,6 +162,20 @@ impl ValidatorKey { } } +impl PartialOrd for ValidatorKey { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for ValidatorKey { + fn cmp(&self, other: &Self) -> Ordering { + self.0 + .serialize_compressed() + .cmp(&other.0.serialize_compressed()) + } +} + impl TryFrom for tendermint::PublicKey { type Error = anyhow::Error; diff --git a/fendermint/vm/topdown/src/observation.rs b/fendermint/vm/topdown/src/observation.rs index 9e481029f..4f803b4b5 100644 --- a/fendermint/vm/topdown/src/observation.rs +++ b/fendermint/vm/topdown/src/observation.rs @@ -120,6 +120,10 @@ impl CertifiedObservation { &self.observation } + pub fn observation_signature(&self) -> &RecoverableECDSASignature { + &self.observation_signature + } + pub fn ensure_valid(&self) -> anyhow::Result { let to_sign = fvm_ipld_encoding::to_vec(&self.observation)?; let (pk1, _) = self.observation_signature.recover(&to_sign)?; diff --git a/fendermint/vm/topdown/src/vote/mod.rs b/fendermint/vm/topdown/src/vote/mod.rs index c8b32f975..74885d8d5 100644 --- a/fendermint/vm/topdown/src/vote/mod.rs +++ b/fendermint/vm/topdown/src/vote/mod.rs @@ -17,6 +17,7 @@ use crate::vote::store::VoteStore; use crate::vote::tally::VoteTally; use crate::BlockHeight; use error::Error; +use fendermint_crypto::quorum::ECDSACertificate; use fendermint_crypto::SecretKey; use fendermint_vm_genesis::ValidatorKey; use serde::{Deserialize, Serialize}; @@ -121,7 +122,7 @@ impl VoteReactorClient { } /// Queries the vote tally to see if there are new quorum formed - pub async fn find_quorum(&self) -> anyhow::Result> { + pub async fn find_quorum(&self) -> anyhow::Result>> { self.request(VoteReactorRequest::FindQuorum).await } @@ -183,7 +184,7 @@ enum VoteReactorRequest { /// Get the current vote tally state variables in vote tally QueryState(oneshot::Sender), /// Queries the vote tally to see if there are new quorum formed - FindQuorum(oneshot::Sender>), + FindQuorum(oneshot::Sender>>), /// Update power of some validators. If the weight is zero, the validator is removed /// from the power table. UpdatePowerTable { diff --git a/fendermint/vm/topdown/src/vote/payload.rs b/fendermint/vm/topdown/src/vote/payload.rs index a941f8999..a5db8b62e 100644 --- a/fendermint/vm/topdown/src/vote/payload.rs +++ b/fendermint/vm/topdown/src/vote/payload.rs @@ -3,8 +3,9 @@ use crate::observation::{CertifiedObservation, Observation}; use crate::vote::Weight; -use crate::{BlockHeight, Bytes}; +use crate::BlockHeight; use anyhow::anyhow; +use fendermint_crypto::secp::RecoverableECDSASignature; use fendermint_vm_genesis::ValidatorKey; use serde::{Deserialize, Serialize}; use std::collections::HashMap; @@ -55,9 +56,9 @@ impl Vote { } } - pub fn ballot_sig(&self) -> &RecoverableECDSASignature { + pub fn observation_signature(&self) -> &RecoverableECDSASignature { match self { - Self::V1 { payload, .. } => &payload.signature, + Self::V1 { payload, .. } => payload.observation_signature(), } } } diff --git a/fendermint/vm/topdown/src/vote/store.rs b/fendermint/vm/topdown/src/vote/store.rs index d7413da58..cc10f359f 100644 --- a/fendermint/vm/topdown/src/vote/store.rs +++ b/fendermint/vm/topdown/src/vote/store.rs @@ -126,21 +126,20 @@ impl<'a> VoteAgg<'a> { pub fn generate_cert( &self, ordered_validators: Vec<(&ValidatorKey, &Weight)>, - ballot: &Ballot, - ) -> Result, Error> { - let mut cert = ECDSACertificate::new_of_size(ballot.clone(), ordered_validators.len()); + observation: &Observation, + ) -> Result, Error> { + let mut cert = ECDSACertificate::new_of_size(observation.clone(), ordered_validators.len()); for (idx, (validator, _)) in ordered_validators.into_iter().enumerate() { let Some(vote) = self.0.get(validator) else { continue; }; - if vote.ballot() == *ballot { + if *vote.observation() == *observation { cert.set_signature( idx, - ballot, validator.public_key(), - vote.ballot_sig().clone(), + vote.observation_signature().clone(), ) .map_err(|e| { tracing::error!(err = e.to_string(), "cannot verify signature"); diff --git a/fendermint/vm/topdown/src/vote/tally.rs b/fendermint/vm/topdown/src/vote/tally.rs index e0c1f3307..8b27113e1 100644 --- a/fendermint/vm/topdown/src/vote/tally.rs +++ b/fendermint/vm/topdown/src/vote/tally.rs @@ -150,7 +150,7 @@ impl VoteTally { } /// Find a block on the (from our perspective) finalized chain that gathered enough votes from validators. - pub fn find_quorum(&self) -> Result, Error> { + pub fn find_quorum(&self) -> Result>, Error> { let quorum_threshold = self.quorum_threshold(); let Some(max_height) = self.votes.latest_vote_height()? else { tracing::info!("vote store has no vote yet, skip finding quorum"); @@ -170,7 +170,8 @@ impl VoteTally { ); if weight >= quorum_threshold { - return Ok(Some(observation.clone())); + let cert = votes.generate_cert(self.ordered_validators(), observation)?; + return Ok(Some(cert)); } } @@ -220,7 +221,7 @@ impl VoteTally { fn ordered_validators(&self) -> Vec<(&ValidatorKey, &Weight)> { let mut sorted_powers = self.power_table.iter().collect::>(); - sorted_powers.sort_by(|(a, b)| { + sorted_powers.sort_by(|a, b| { let cmp = b.1.cmp(a.1); if cmp != Ordering::Equal { cmp @@ -389,7 +390,7 @@ mod tests { } let ob = vote_tally.find_quorum().unwrap().unwrap(); - assert_eq!(ob, observation); + assert_eq!(ob.payload(), observation); let new_powers = (0..3) .map(|_| (random_validator_key().1.clone(), 1)) @@ -435,6 +436,6 @@ mod tests { ]); let ob = vote_tally.find_quorum().unwrap().unwrap(); - assert_eq!(ob, observation); + assert_eq!(ob.payload(), observation); } } From 4a6a4b5b0e066a286f367fd8297b98d7d671d0ad Mon Sep 17 00:00:00 2001 From: cryptoAtwill Date: Tue, 1 Oct 2024 18:08:16 +0800 Subject: [PATCH 03/22] integrate quorum cert --- fendermint/vm/topdown/src/lib.rs | 93 +++++++++++++++++++++++- fendermint/vm/topdown/src/observation.rs | 14 +++- fendermint/vm/topdown/src/syncer/mod.rs | 42 ++++++++++- fendermint/vm/topdown/src/syncer/poll.rs | 8 ++ 4 files changed, 153 insertions(+), 4 deletions(-) diff --git a/fendermint/vm/topdown/src/lib.rs b/fendermint/vm/topdown/src/lib.rs index 71730ba65..148e5a958 100644 --- a/fendermint/vm/topdown/src/lib.rs +++ b/fendermint/vm/topdown/src/lib.rs @@ -19,6 +19,7 @@ pub mod vote; use async_stm::Stm; use async_trait::async_trait; use ethers::utils::hex; +use fendermint_crypto::quorum::ECDSACertificate; use fvm_shared::clock::ChainEpoch; use ipc_api::cross::IpcEnvelope; use ipc_api::staking::StakingChangeRequest; @@ -29,8 +30,10 @@ use std::time::Duration; pub use crate::cache::{SequentialAppendError, SequentialKeyCache, ValueIter}; pub use crate::error::Error; pub use crate::finality::CachedFinalityProvider; -use crate::observation::Observation; +use crate::observation::{LinearizedParentBlockView, Observation}; +use crate::syncer::ParentSyncerReactorClient; pub use crate::toggle::Toggle; +use crate::vote::VoteReactorClient; pub type BlockHeight = u64; pub type Bytes = Vec; @@ -117,6 +120,94 @@ pub enum Checkpoint { V1(Observation), } +/// Topdown proposal as part of fendermint proposal execution +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TopdownProposal { + cert: ECDSACertificate, + effects: (Vec, Vec), +} + +pub struct TopdownClient { + syncer: ParentSyncerReactorClient, + voting: VoteReactorClient, +} + +impl TopdownClient { + pub async fn find_topdown_proposal(&self) -> anyhow::Result> { + let Some(quorum_cert) = self.voting.find_quorum().await? else { + return Ok(None); + }; + + let Ok(views) = self + .syncer + .query_parent_block_view(quorum_cert.payload().parent_height) + .await? + else { + // absorb the error, dont alert the caller + return Ok(None); + }; + + let mut linear = LinearizedParentBlockView::from(quorum_cert.payload()); + + let mut xnet_msgs = vec![]; + let mut validator_changes = vec![]; + + for maybe_view in views { + let Some(v) = maybe_view else { + tracing::error!( + till = quorum_cert.payload().parent_height, + "parent block view does not have all the data" + ); + return Ok(None); + }; + + if let Err(e) = linear.append(v.clone()) { + tracing::error!(err = e.to_string(), "parent block view cannot be appended"); + return Ok(None); + } + + if let Some(payload) = v.payload { + xnet_msgs.extend(payload.xnet_msgs); + validator_changes.extend(payload.validator_changes); + } + } + + let ob = match linear.into_observation() { + Ok(ob) => ob, + Err(e) => { + tracing::error!( + err = e.to_string(), + "cannot convert linearized parent view into observation" + ); + return Ok(None); + } + }; + + if ob != *quorum_cert.payload() { + // could be due to the minor quorum, just return no proposal + tracing::warn!( + created = ob.to_string(), + expected = quorum_cert.payload().to_string(), + "block view observation created not match quorum cert" + ); + return Ok(None); + } + + Ok(Some(TopdownProposal { + cert: quorum_cert, + effects: (xnet_msgs, validator_changes), + })) + } + + pub async fn parent_finalized(&self, checkpoint: Checkpoint) -> anyhow::Result<()> { + self.voting + .set_quorum_finalized(checkpoint.target_height()) + .await??; + self.syncer.finalize_parent_height(checkpoint).await?; + Ok(()) + } +} + /// The finality view for IPC parent at certain height. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct IPCParentFinality { diff --git a/fendermint/vm/topdown/src/observation.rs b/fendermint/vm/topdown/src/observation.rs index 4f803b4b5..d83331c6d 100644 --- a/fendermint/vm/topdown/src/observation.rs +++ b/fendermint/vm/topdown/src/observation.rs @@ -199,7 +199,7 @@ impl ObservationConfig { } } -struct LinearizedParentBlockView { +pub(crate) struct LinearizedParentBlockView { parent_height: u64, parent_hash: Option, cumulative_effects_comm: Bytes, @@ -215,6 +215,16 @@ impl From<&Checkpoint> for LinearizedParentBlockView { } } +impl From<&Observation> for LinearizedParentBlockView { + fn from(value: &Observation) -> Self { + LinearizedParentBlockView { + parent_height: value.parent_height, + parent_hash: Some(value.parent_hash.clone()), + cumulative_effects_comm: value.cumulative_effects_comm.clone(), + } + } +} + impl LinearizedParentBlockView { fn new_commitment(&mut self, to_append: Bytes) { let bytes = [ @@ -242,7 +252,7 @@ impl LinearizedParentBlockView { Ok(()) } - fn into_observation(self) -> Result { + pub fn into_observation(self) -> Result { let Some(hash) = self.parent_hash else { return Err(Error::CannotCommitObservationAtNullBlock( self.parent_height, diff --git a/fendermint/vm/topdown/src/syncer/mod.rs b/fendermint/vm/topdown/src/syncer/mod.rs index 691307367..14a325d4f 100644 --- a/fendermint/vm/topdown/src/syncer/mod.rs +++ b/fendermint/vm/topdown/src/syncer/mod.rs @@ -3,12 +3,14 @@ use crate::observation::{Observation, ObservationConfig}; use crate::proxy::ParentQueryProxy; +use crate::syncer::error::Error; +use crate::syncer::payload::ParentBlockView; use crate::syncer::poll::ParentPoll; use crate::syncer::store::ParentViewStore; use crate::{BlockHeight, Checkpoint}; use std::time::Duration; use tokio::select; -use tokio::sync::mpsc; +use tokio::sync::{mpsc, oneshot}; pub mod error; pub mod payload; @@ -92,11 +94,26 @@ impl ParentSyncerReactorClient { self.tx.send(ParentSyncerRequest::Finalized(cp)).await?; Ok(()) } + + pub async fn query_parent_block_view( + &self, + to: BlockHeight, + ) -> anyhow::Result>, Error>> { + let (tx, rx) = oneshot::channel(); + self.tx + .send(ParentSyncerRequest::QueryParentBlockViews { to, tx }) + .await?; + Ok(rx.await?) + } } enum ParentSyncerRequest { /// A new parent height is finalized Finalized(Checkpoint), + QueryParentBlockViews { + to: BlockHeight, + tx: oneshot::Sender>, Error>>, + }, } fn handle_request(req: ParentSyncerRequest, poller: &mut ParentPoll) @@ -111,5 +128,28 @@ where tracing::error!(height, err = e.to_string(), "cannot finalize parent viewer"); } } + ParentSyncerRequest::QueryParentBlockViews { to, tx } => { + let store = poller.store(); + + let mut r = vec![]; + + let start = poller.last_checkpoint().target_height() + 1; + for h in start..=to { + match store.get(h) { + Ok(v) => r.push(v), + Err(e) => { + tracing::error!( + height = h, + err = e.to_string(), + "cannot query parent block view" + ); + let _ = tx.send(Err(e)); + return; + } + } + } + + let _ = tx.send(Ok(r)); + } } } diff --git a/fendermint/vm/topdown/src/syncer/poll.rs b/fendermint/vm/topdown/src/syncer/poll.rs index 125491fae..f5ce93e4b 100644 --- a/fendermint/vm/topdown/src/syncer/poll.rs +++ b/fendermint/vm/topdown/src/syncer/poll.rs @@ -40,6 +40,14 @@ where } } + pub fn store(&self) -> &S { + &self.store + } + + pub fn last_checkpoint(&self) -> &Checkpoint { + &self.last_finalized + } + /// The target block height is finalized, purge all the parent view before the target height pub fn finalize(&mut self, checkpoint: Checkpoint) -> Result<(), Error> { let Some(min_height) = self.store.min_parent_view_height()? else { From 95426eb8c147a8a5c6eb5ae15c2e79730a6d3a2d Mon Sep 17 00:00:00 2001 From: cryptoAtwill Date: Thu, 3 Oct 2024 16:37:21 +0800 Subject: [PATCH 04/22] clear up types --- Cargo.lock | 1 + contracts/contracts/errors/IPCErrors.sol | 2 +- .../contracts/gateway/GatewayGetterFacet.sol | 10 +- .../gateway/router/TopDownFinalityFacet.sol | 24 +- contracts/contracts/interfaces/IGateway.sol | 4 +- contracts/contracts/lib/LibGateway.sol | 18 +- .../contracts/lib/LibGatewayActorStorage.sol | 4 +- contracts/contracts/structs/CrossNet.sol | 5 +- contracts/test/IntegrationTestBase.sol | 10 +- contracts/test/helpers/SelectorLibrary.sol | 4 +- .../test/integration/GatewayDiamond.t.sol | 18 +- .../test/integration/L2GatewayDiamond.t.sol | 2 +- contracts/test/integration/MultiSubnet.t.sol | 24 +- fendermint/app/settings/src/lib.rs | 13 +- fendermint/app/src/app.rs | 6 + fendermint/app/src/cmd/debug.rs | 37 +- fendermint/app/src/cmd/run.rs | 321 +++++----- fendermint/app/src/ipc.rs | 22 +- fendermint/vm/interpreter/src/chain.rs | 165 +---- .../vm/interpreter/src/fvm/state/ipc.rs | 28 +- fendermint/vm/interpreter/src/fvm/topdown.rs | 34 +- fendermint/vm/message/Cargo.toml | 1 + fendermint/vm/message/src/ipc.rs | 3 +- fendermint/vm/topdown/src/convert.rs | 41 +- fendermint/vm/topdown/src/finality/fetch.rs | 450 ------------- fendermint/vm/topdown/src/finality/mod.rs | 177 ------ fendermint/vm/topdown/src/finality/null.rs | 566 ----------------- fendermint/vm/topdown/src/launch.rs | 237 +++++++ fendermint/vm/topdown/src/lib.rs | 176 ++---- fendermint/vm/topdown/src/sync/mod.rs | 203 ------ fendermint/vm/topdown/src/sync/syncer.rs | 596 ------------------ fendermint/vm/topdown/src/sync/tendermint.rs | 47 -- fendermint/vm/topdown/src/syncer/mod.rs | 83 ++- fendermint/vm/topdown/src/syncer/poll.rs | 126 ++-- fendermint/vm/topdown/src/syncer/store.rs | 8 + fendermint/vm/topdown/src/toggle.rs | 130 ---- fendermint/vm/topdown/src/vote/error.rs | 3 + fendermint/vm/topdown/src/vote/mod.rs | 39 +- .../vm/topdown/src/vote/operation/paused.rs | 2 +- fendermint/vm/topdown/src/vote/tally.rs | 11 + fendermint/vm/topdown/src/voting.rs | 480 -------------- fendermint/vm/topdown/tests/vote_reactor.rs | 6 +- ipc/api/src/staking.rs | 6 +- .../src/commands/crossmsg/topdown_cross.rs | 2 +- ipc/provider/src/lib.rs | 6 +- ipc/provider/src/manager/evm/manager.rs | 4 +- ipc/provider/src/manager/subnet.rs | 4 +- ipld/resolver/src/behaviour/membership.rs | 16 +- ipld/resolver/src/client.rs | 3 +- ipld/resolver/src/lib.rs | 2 +- ipld/resolver/src/service.rs | 6 +- ipld/resolver/src/vote_record.rs | 5 + ipld/resolver/tests/smoke.rs | 6 +- 53 files changed, 827 insertions(+), 3370 deletions(-) delete mode 100644 fendermint/vm/topdown/src/finality/fetch.rs delete mode 100644 fendermint/vm/topdown/src/finality/mod.rs delete mode 100644 fendermint/vm/topdown/src/finality/null.rs create mode 100644 fendermint/vm/topdown/src/launch.rs delete mode 100644 fendermint/vm/topdown/src/sync/mod.rs delete mode 100644 fendermint/vm/topdown/src/sync/syncer.rs delete mode 100644 fendermint/vm/topdown/src/sync/tendermint.rs delete mode 100644 fendermint/vm/topdown/src/toggle.rs delete mode 100644 fendermint/vm/topdown/src/voting.rs diff --git a/Cargo.lock b/Cargo.lock index c6d879747..78d45926b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3374,6 +3374,7 @@ dependencies = [ "fendermint_vm_actor_interface", "fendermint_vm_encoding", "fendermint_vm_message", + "fendermint_vm_topdown", "fvm_ipld_encoding", "fvm_shared", "hex", diff --git a/contracts/contracts/errors/IPCErrors.sol b/contracts/contracts/errors/IPCErrors.sol index 7f838e1a4..88883d857 100644 --- a/contracts/contracts/errors/IPCErrors.sol +++ b/contracts/contracts/errors/IPCErrors.sol @@ -61,7 +61,7 @@ error NotValidator(address); error OldConfigurationNumber(); error PQDoesNotContainAddress(); error PQEmpty(); -error ParentFinalityAlreadyCommitted(); +error TopdownCheckpointAlreadyCommitted(); error PostboxNotExist(); error SignatureReplay(); error SubnetAlreadyKilled(); diff --git a/contracts/contracts/gateway/GatewayGetterFacet.sol b/contracts/contracts/gateway/GatewayGetterFacet.sol index 7dd47386e..2db734eac 100644 --- a/contracts/contracts/gateway/GatewayGetterFacet.sol +++ b/contracts/contracts/gateway/GatewayGetterFacet.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT OR Apache-2.0 pragma solidity ^0.8.23; -import {BottomUpCheckpoint, BottomUpMsgBatch, IpcEnvelope, ParentFinality} from "../structs/CrossNet.sol"; +import {BottomUpCheckpoint, BottomUpMsgBatch, IpcEnvelope, TopdownCheckpoint} from "../structs/CrossNet.sol"; import {QuorumInfo} from "../structs/Quorum.sol"; import {SubnetID, Subnet} from "../structs/Subnet.sol"; import {Membership} from "../structs/Subnet.sol"; @@ -71,13 +71,13 @@ contract GatewayGetterFacet { /// @notice Returns the parent chain finality information for a given block number. /// @param blockNumber The block number for which to retrieve parent-finality information. - function getParentFinality(uint256 blockNumber) external view returns (ParentFinality memory) { - return LibGateway.getParentFinality(blockNumber); + function getTopdownCheckpoint(uint256 blockNumber) external view returns (TopdownCheckpoint memory) { + return LibGateway.getTopdownCheckpoint(blockNumber); } /// @notice Gets the most recent parent-finality information from the parent. - function getLatestParentFinality() external view returns (ParentFinality memory) { - return LibGateway.getLatestParentFinality(); + function getLatestTopdownCheckpoint() external view returns (TopdownCheckpoint memory) { + return LibGateway.getLatestTopdownCheckpoint(); } /// @notice Returns the subnet with the given id. diff --git a/contracts/contracts/gateway/router/TopDownFinalityFacet.sol b/contracts/contracts/gateway/router/TopDownFinalityFacet.sol index b29f7e203..933b5a194 100644 --- a/contracts/contracts/gateway/router/TopDownFinalityFacet.sol +++ b/contracts/contracts/gateway/router/TopDownFinalityFacet.sol @@ -2,7 +2,7 @@ pragma solidity ^0.8.23; import {GatewayActorModifiers} from "../../lib/LibGatewayActorStorage.sol"; -import {ParentFinality} from "../../structs/CrossNet.sol"; +import {TopdownCheckpoint} from "../../structs/CrossNet.sol"; import {PermissionMode, Validator, ValidatorInfo, StakingChangeRequest, Membership} from "../../structs/Subnet.sol"; import {LibGateway} from "../../lib/LibGateway.sol"; @@ -16,17 +16,17 @@ contract TopDownFinalityFacet is GatewayActorModifiers { using LibValidatorTracking for ParentValidatorsTracker; using LibValidatorSet for ValidatorSet; - /// @notice commit the ipc parent finality into storage and returns the previous committed finality - /// This is useful to understand if the finalities are consistent or if there have been reorgs. - /// If there are no previous committed fainality, it will be default to zero values, i.e. zero height and block hash. - /// @param finality - the parent finality - /// @return hasCommittedBefore A flag that indicates if a finality record has been committed before. - /// @return previousFinality The previous finality information. - function commitParentFinality( - ParentFinality calldata finality - ) external systemActorOnly returns (bool hasCommittedBefore, ParentFinality memory previousFinality) { - previousFinality = LibGateway.commitParentFinality(finality); - hasCommittedBefore = previousFinality.height != 0; + /// @notice commit the ipc topdown checkpoint into storage and returns the previous committed checkpoint + /// This is useful to understand if the checkpoints are consistent or if there have been reorgs. + /// If there are no previous committed checkpoint, it will be default to zero values, i.e. zero height and block hash. + /// @param checkpoint - the topdown checkpoint + /// @return hasCommittedBefore A flag that indicates if a checkpoint record has been committed before. + /// @return previousCheckpoint The previous checkpoint information. + function commitTopdownCheckpoint( + TopdownCheckpoint calldata checkpoint + ) external systemActorOnly returns (bool hasCommittedBefore, TopdownCheckpoint memory previousCheckpoint) { + previousCheckpoint = LibGateway.commitTopdownCheckpoint(checkpoint); + hasCommittedBefore = previousCheckpoint.height != 0; } /// @notice Store the validator change requests from parent. diff --git a/contracts/contracts/interfaces/IGateway.sol b/contracts/contracts/interfaces/IGateway.sol index 9d820dff6..fc5d4650c 100644 --- a/contracts/contracts/interfaces/IGateway.sol +++ b/contracts/contracts/interfaces/IGateway.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT OR Apache-2.0 pragma solidity ^0.8.23; -import {BottomUpCheckpoint, BottomUpMsgBatch, IpcEnvelope, ParentFinality} from "../structs/CrossNet.sol"; +import {BottomUpCheckpoint, BottomUpMsgBatch, IpcEnvelope, TopdownCheckpoint} from "../structs/CrossNet.sol"; import {SubnetID} from "../structs/Subnet.sol"; import {FvmAddress} from "../structs/FvmAddress.sol"; @@ -65,7 +65,7 @@ interface IGateway { function propagate(bytes32 msgCid) external payable; /// @notice commit the ipc parent finality into storage - function commitParentFinality(ParentFinality calldata finality) external; + function commitTopdownCheckpoint(TopdownCheckpoint calldata finality) external; /// @notice creates a new bottom-up checkpoint function createBottomUpCheckpoint( diff --git a/contracts/contracts/lib/LibGateway.sol b/contracts/contracts/lib/LibGateway.sol index f7c3e84ad..90fd69ba0 100644 --- a/contracts/contracts/lib/LibGateway.sol +++ b/contracts/contracts/lib/LibGateway.sol @@ -6,9 +6,9 @@ import {GatewayActorStorage, LibGatewayActorStorage} from "../lib/LibGatewayActo import {BURNT_FUNDS_ACTOR} from "../constants/Constants.sol"; import {SubnetID, Subnet, AssetKind, Asset} from "../structs/Subnet.sol"; import {SubnetActorGetterFacet} from "../subnet/SubnetActorGetterFacet.sol"; -import {CallMsg, IpcMsgKind, IpcEnvelope, OutcomeType, BottomUpMsgBatch, BottomUpMsgBatch, BottomUpCheckpoint, ParentFinality} from "../structs/CrossNet.sol"; +import {CallMsg, IpcMsgKind, IpcEnvelope, OutcomeType, BottomUpMsgBatch, BottomUpMsgBatch, BottomUpCheckpoint, TopdownCheckpoint} from "../structs/CrossNet.sol"; import {Membership} from "../structs/Subnet.sol"; -import {CannotSendCrossMsgToItself, MethodNotAllowed, MaxMsgsPerBatchExceeded, InvalidXnetMessage ,OldConfigurationNumber, NotRegisteredSubnet, InvalidActorAddress, ParentFinalityAlreadyCommitted, InvalidXnetMessageReason} from "../errors/IPCErrors.sol"; +import {CannotSendCrossMsgToItself, MethodNotAllowed, MaxMsgsPerBatchExceeded, InvalidXnetMessage ,OldConfigurationNumber, NotRegisteredSubnet, InvalidActorAddress, TopdownCheckpointAlreadyCommitted, InvalidXnetMessageReason} from "../errors/IPCErrors.sol"; import {CrossMsgHelper} from "../lib/CrossMsgHelper.sol"; import {FilAddress} from "fevmate/contracts/utils/FilAddress.sol"; import {SubnetIDHelper} from "../lib/SubnetIDHelper.sol"; @@ -116,27 +116,27 @@ library LibGateway { /// @notice obtain the ipc parent finality at certain block number /// @param blockNumber - the block number to obtain the finality - function getParentFinality(uint256 blockNumber) internal view returns (ParentFinality memory) { + function getTopdownCheckpoint(uint256 blockNumber) internal view returns (TopdownCheckpoint memory) { GatewayActorStorage storage s = LibGatewayActorStorage.appStorage(); return s.finalitiesMap[blockNumber]; } /// @notice obtain the latest committed ipc parent finality - function getLatestParentFinality() internal view returns (ParentFinality memory) { + function getLatestTopdownCheckpoint() internal view returns (TopdownCheckpoint memory) { GatewayActorStorage storage s = LibGatewayActorStorage.appStorage(); - return getParentFinality(s.latestParentHeight); + return getTopdownCheckpoint(s.latestParentHeight); } /// @notice commit the ipc parent finality into storage /// @param finality - the finality to be committed - function commitParentFinality( - ParentFinality calldata finality - ) internal returns (ParentFinality memory lastFinality) { + function commitTopdownCheckpoint( + TopdownCheckpoint calldata finality + ) internal returns (TopdownCheckpoint memory lastFinality) { GatewayActorStorage storage s = LibGatewayActorStorage.appStorage(); uint256 lastHeight = s.latestParentHeight; if (lastHeight >= finality.height) { - revert ParentFinalityAlreadyCommitted(); + revert TopdownCheckpointAlreadyCommitted(); } lastFinality = s.finalitiesMap[lastHeight]; diff --git a/contracts/contracts/lib/LibGatewayActorStorage.sol b/contracts/contracts/lib/LibGatewayActorStorage.sol index 4cb63536e..a8f41ecdc 100644 --- a/contracts/contracts/lib/LibGatewayActorStorage.sol +++ b/contracts/contracts/lib/LibGatewayActorStorage.sol @@ -3,7 +3,7 @@ pragma solidity ^0.8.23; import {NotSystemActor, NotEnoughFunds} from "../errors/IPCErrors.sol"; import {QuorumMap} from "../structs/Quorum.sol"; -import {BottomUpCheckpoint, BottomUpMsgBatch, IpcEnvelope, ParentFinality} from "../structs/CrossNet.sol"; +import {BottomUpCheckpoint, BottomUpMsgBatch, IpcEnvelope, TopdownCheckpoint} from "../structs/CrossNet.sol"; import {SubnetID, Subnet, ParentValidatorsTracker} from "../structs/Subnet.sol"; import {Membership} from "../structs/Subnet.sol"; import {AccountHelper} from "../lib/AccountHelper.sol"; @@ -60,7 +60,7 @@ struct GatewayActorStorage { /// SubnetID => Subnet mapping(bytes32 => Subnet) subnets; /// @notice The parent finalities. Key is the block number, value is the finality struct. - mapping(uint256 => ParentFinality) finalitiesMap; + mapping(uint256 => TopdownCheckpoint) finalitiesMap; /// @notice Postbox keeps track of all the cross-net messages triggered by /// an actor that need to be propagated further through the hierarchy. /// cross-net message id => CrossMsg diff --git a/contracts/contracts/structs/CrossNet.sol b/contracts/contracts/structs/CrossNet.sol index 368554b60..da1eae616 100644 --- a/contracts/contracts/structs/CrossNet.sol +++ b/contracts/contracts/structs/CrossNet.sol @@ -8,9 +8,12 @@ uint64 constant MAX_MSGS_PER_BATCH = 10; uint256 constant BATCH_PERIOD = 100; /// @notice The parent finality for IPC parent at certain height. -struct ParentFinality { +struct TopdownCheckpoint { uint256 height; bytes32 blockHash; + /// The commiment of topdown effects (topdown messages + validator changes). + /// Current version is the CID. + bytes effectsCommitment; } /// @notice A bottom-up checkpoint type. diff --git a/contracts/test/IntegrationTestBase.sol b/contracts/test/IntegrationTestBase.sol index 84eaf5456..c42e8233a 100644 --- a/contracts/test/IntegrationTestBase.sol +++ b/contracts/test/IntegrationTestBase.sol @@ -7,7 +7,7 @@ import "../contracts/errors/IPCErrors.sol"; import {EMPTY_BYTES, METHOD_SEND} from "../contracts/constants/Constants.sol"; import {ConsensusType} from "../contracts/enums/ConsensusType.sol"; import {IDiamond} from "../contracts/interfaces/IDiamond.sol"; -import {IpcEnvelope, BottomUpCheckpoint, IpcMsgKind, ParentFinality, CallMsg} from "../contracts/structs/CrossNet.sol"; +import {IpcEnvelope, BottomUpCheckpoint, IpcMsgKind, TopdownCheckpoint, CallMsg} from "../contracts/structs/CrossNet.sol"; import {FvmAddress} from "../contracts/structs/FvmAddress.sol"; import {SubnetID, AssetKind, PermissionMode, PermissionMode, Subnet, Asset, IPCAddress, Validator} from "../contracts/structs/Subnet.sol"; import {SubnetIDHelper} from "../contracts/lib/SubnetIDHelper.sol"; @@ -762,10 +762,10 @@ contract IntegrationTestBase is Test, TestParams, TestRegistry, TestSubnetActor, weights[1] = 100; weights[2] = 100; - ParentFinality memory finality = ParentFinality({height: block.number, blockHash: bytes32(0)}); + TopdownCheckpoint memory finality = TopdownCheckpoint({height: block.number, blockHash: bytes32(0), effectsCommitment: new bytes(0)}); vm.prank(FilAddress.SYSTEM_ACTOR); - gatewayDiamond.topDownFinalizer().commitParentFinality(finality); + gatewayDiamond.topDownFinalizer().commitTopdownCheckpoint(finality); } function setupWhiteListMethod(address caller, address src) public returns (bytes32) { @@ -809,11 +809,11 @@ contract IntegrationTestBase is Test, TestParams, TestRegistry, TestSubnetActor, weights[0] = weight; vm.deal(validator, 1); - ParentFinality memory finality = ParentFinality({height: block.number, blockHash: bytes32(0)}); + TopdownCheckpoint memory finality = TopdownCheckpoint({height: block.number, blockHash: bytes32(0), effectsCommitment: new bytes(0)}); // uint64 n = gatewayDiamond.getter().getLastConfigurationNumber() + 1; vm.startPrank(FilAddress.SYSTEM_ACTOR); - gatewayDiamond.topDownFinalizer().commitParentFinality(finality); + gatewayDiamond.topDownFinalizer().commitTopdownCheckpoint(finality); vm.stopPrank(); } diff --git a/contracts/test/helpers/SelectorLibrary.sol b/contracts/test/helpers/SelectorLibrary.sol index adcdc5e96..f33fcaa60 100644 --- a/contracts/test/helpers/SelectorLibrary.sol +++ b/contracts/test/helpers/SelectorLibrary.sol @@ -27,7 +27,7 @@ library SelectorLibrary { if (keccak256(abi.encodePacked(facetName)) == keccak256(abi.encodePacked("GatewayGetterFacet"))) { return abi.decode( - hex"000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000218789f83b0000000000000000000000000000000000000000000000000000000006c46853000000000000000000000000000000000000000000000000000000002da5794a00000000000000000000000000000000000000000000000000000000dd81b5cf0000000000000000000000000000000000000000000000000000000041b6a2e80000000000000000000000000000000000000000000000000000000038d6693200000000000000000000000000000000000000000000000000000000b3ab3f7400000000000000000000000000000000000000000000000000000000ac12d763000000000000000000000000000000000000000000000000000000004aa8f8a500000000000000000000000000000000000000000000000000000000ca41d5ce00000000000000000000000000000000000000000000000000000000444ead5100000000000000000000000000000000000000000000000000000000d6c5c39700000000000000000000000000000000000000000000000000000000544dddff000000000000000000000000000000000000000000000000000000006ad21bb000000000000000000000000000000000000000000000000000000000a517218f000000000000000000000000000000000000000000000000000000009704276600000000000000000000000000000000000000000000000000000000b1ba49b000000000000000000000000000000000000000000000000000000000f3229131000000000000000000000000000000000000000000000000000000000338150f0000000000000000000000000000000000000000000000000000000094074b03000000000000000000000000000000000000000000000000000000007edeac920000000000000000000000000000000000000000000000000000000006572c1a00000000000000000000000000000000000000000000000000000000c66c66a1000000000000000000000000000000000000000000000000000000003594c3c1000000000000000000000000000000000000000000000000000000009d3070b50000000000000000000000000000000000000000000000000000000042398a9a00000000000000000000000000000000000000000000000000000000fa34a400000000000000000000000000000000000000000000000000000000005d02968500000000000000000000000000000000000000000000000000000000599c7bd10000000000000000000000000000000000000000000000000000000005aff0b3000000000000000000000000000000000000000000000000000000008cfd78e70000000000000000000000000000000000000000000000000000000002e30f9a00000000000000000000000000000000000000000000000000000000a2b6715800000000000000000000000000000000000000000000000000000000", + hex"000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000218789f83b0000000000000000000000000000000000000000000000000000000006c46853000000000000000000000000000000000000000000000000000000002da5794a00000000000000000000000000000000000000000000000000000000dd81b5cf0000000000000000000000000000000000000000000000000000000041b6a2e80000000000000000000000000000000000000000000000000000000038d6693200000000000000000000000000000000000000000000000000000000b3ab3f7400000000000000000000000000000000000000000000000000000000ac12d763000000000000000000000000000000000000000000000000000000004aa8f8a500000000000000000000000000000000000000000000000000000000ca41d5ce00000000000000000000000000000000000000000000000000000000444ead5100000000000000000000000000000000000000000000000000000000d6c5c39700000000000000000000000000000000000000000000000000000000544dddff000000000000000000000000000000000000000000000000000000006ad21bb000000000000000000000000000000000000000000000000000000000a517218f000000000000000000000000000000000000000000000000000000009704276600000000000000000000000000000000000000000000000000000000b1ba49b000000000000000000000000000000000000000000000000000000000f322913100000000000000000000000000000000000000000000000000000000c17117e90000000000000000000000000000000000000000000000000000000094074b030000000000000000000000000000000000000000000000000000000006572c1a00000000000000000000000000000000000000000000000000000000c66c66a1000000000000000000000000000000000000000000000000000000003594c3c1000000000000000000000000000000000000000000000000000000009d3070b50000000000000000000000000000000000000000000000000000000042398a9a000000000000000000000000000000000000000000000000000000003c71caeb00000000000000000000000000000000000000000000000000000000fa34a400000000000000000000000000000000000000000000000000000000005d02968500000000000000000000000000000000000000000000000000000000599c7bd10000000000000000000000000000000000000000000000000000000005aff0b3000000000000000000000000000000000000000000000000000000008cfd78e70000000000000000000000000000000000000000000000000000000002e30f9a00000000000000000000000000000000000000000000000000000000a2b6715800000000000000000000000000000000000000000000000000000000", (bytes4[]) ); } @@ -55,7 +55,7 @@ library SelectorLibrary { if (keccak256(abi.encodePacked(facetName)) == keccak256(abi.encodePacked("TopDownFinalityFacet"))) { return abi.decode( - hex"000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000040df144610000000000000000000000000000000000000000000000000000000011196974000000000000000000000000000000000000000000000000000000008fbe0b7c00000000000000000000000000000000000000000000000000000000e49a547d00000000000000000000000000000000000000000000000000000000", + hex"000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000040df144610000000000000000000000000000000000000000000000000000000098ac2e7d000000000000000000000000000000000000000000000000000000008fbe0b7c00000000000000000000000000000000000000000000000000000000e49a547d00000000000000000000000000000000000000000000000000000000", (bytes4[]) ); } diff --git a/contracts/test/integration/GatewayDiamond.t.sol b/contracts/test/integration/GatewayDiamond.t.sol index 0759b98f0..d779d33f2 100644 --- a/contracts/test/integration/GatewayDiamond.t.sol +++ b/contracts/test/integration/GatewayDiamond.t.sol @@ -12,7 +12,7 @@ import {IDiamond} from "../../contracts/interfaces/IDiamond.sol"; import {IDiamondLoupe} from "../../contracts/interfaces/IDiamondLoupe.sol"; import {IDiamondCut} from "../../contracts/interfaces/IDiamondCut.sol"; import {QuorumInfo} from "../../contracts/structs/Quorum.sol"; -import {IpcEnvelope, BottomUpMsgBatch, BottomUpCheckpoint, ParentFinality} from "../../contracts/structs/CrossNet.sol"; +import {IpcEnvelope, BottomUpMsgBatch, BottomUpCheckpoint, TopdownCheckpoint} from "../../contracts/structs/CrossNet.sol"; import {FvmAddress} from "../../contracts/structs/FvmAddress.sol"; import {SubnetID, Subnet, IPCAddress, Validator, StakingChange, StakingChangeRequest, Asset, StakingOperation} from "../../contracts/structs/Subnet.sol"; import {SubnetIDHelper} from "../../contracts/lib/SubnetIDHelper.sol"; @@ -948,7 +948,7 @@ contract GatewayActorDiamondTest is Test, IntegrationTestBase, SubnetWithNativeT ); } - function testGatewayDiamond_CommitParentFinality_Fails_NotSystemActor() public { + function testGatewayDiamond_CommitTopdownCheckpoint_Fails_NotSystemActor() public { address caller = vm.addr(100); FvmAddress[] memory validators = new FvmAddress[](1); @@ -959,9 +959,9 @@ contract GatewayActorDiamondTest is Test, IntegrationTestBase, SubnetWithNativeT vm.prank(caller); vm.expectRevert(NotSystemActor.selector); - ParentFinality memory finality = ParentFinality({height: block.number, blockHash: bytes32(0)}); + TopdownCheckpoint memory finality = TopdownCheckpoint({height: block.number, blockHash: bytes32(0), effectsCommitment: new bytes(0)}); - gatewayDiamond.topDownFinalizer().commitParentFinality(finality); + gatewayDiamond.topDownFinalizer().commitTopdownCheckpoint(finality); } function testGatewayDiamond_applyFinality_works() public { @@ -1032,7 +1032,7 @@ contract GatewayActorDiamondTest is Test, IntegrationTestBase, SubnetWithNativeT vm.stopPrank(); } - function testGatewayDiamond_CommitParentFinality_Works_WithQuery() public { + function testGatewayDiamond_CommitTopdownCheckpoint_Works_WithQuery() public { FvmAddress[] memory validators = new FvmAddress[](2); validators[0] = FvmAddressHelper.from(vm.addr(100)); validators[1] = FvmAddressHelper.from(vm.addr(101)); @@ -1045,14 +1045,14 @@ contract GatewayActorDiamondTest is Test, IntegrationTestBase, SubnetWithNativeT // not the same as init committed parent finality height vm.roll(10); - ParentFinality memory finality = ParentFinality({height: block.number, blockHash: bytes32(0)}); + TopdownCheckpoint memory finality = TopdownCheckpoint({height: block.number, blockHash: bytes32(0), effectsCommitment: new bytes(0)}); - gatewayDiamond.topDownFinalizer().commitParentFinality(finality); - ParentFinality memory committedFinality = gatewayDiamond.getter().getParentFinality(block.number); + gatewayDiamond.topDownFinalizer().commitTopdownCheckpoint(finality); + TopdownCheckpoint memory committedFinality = gatewayDiamond.getter().getTopdownCheckpoint(block.number); require(committedFinality.height == finality.height, "heights are not equal"); require(committedFinality.blockHash == finality.blockHash, "blockHash is not equal"); - require(gatewayDiamond.getter().getLatestParentFinality().height == block.number, "finality height not equal"); + require(gatewayDiamond.getter().getLatestTopdownCheckpoint().height == block.number, "finality height not equal"); vm.stopPrank(); } diff --git a/contracts/test/integration/L2GatewayDiamond.t.sol b/contracts/test/integration/L2GatewayDiamond.t.sol index 69409fae6..e70eb9cee 100644 --- a/contracts/test/integration/L2GatewayDiamond.t.sol +++ b/contracts/test/integration/L2GatewayDiamond.t.sol @@ -30,7 +30,7 @@ contract L2GatewayActorDiamondTest is Test, L2GatewayActorDiamond { using CrossMsgHelper for IpcEnvelope; using GatewayFacetsHelper for GatewayDiamond; - function testGatewayDiamond_CommitParentFinality_BigNumberOfMessages() public { + function testGatewayDiamond_CommitTopdownCheckpoint_BigNumberOfMessages() public { uint256 n = 2000; FvmAddress[] memory validators = new FvmAddress[](1); validators[0] = FvmAddressHelper.from(vm.addr(100)); diff --git a/contracts/test/integration/MultiSubnet.t.sol b/contracts/test/integration/MultiSubnet.t.sol index 088f828fb..9211899ce 100644 --- a/contracts/test/integration/MultiSubnet.t.sol +++ b/contracts/test/integration/MultiSubnet.t.sol @@ -4,7 +4,7 @@ pragma solidity ^0.8.23; import "forge-std/Test.sol"; import "../../contracts/errors/IPCErrors.sol"; import {EMPTY_BYTES, METHOD_SEND} from "../../contracts/constants/Constants.sol"; -import {IpcEnvelope, BottomUpMsgBatch, BottomUpCheckpoint, ParentFinality, IpcMsgKind, OutcomeType} from "../../contracts/structs/CrossNet.sol"; +import {IpcEnvelope, BottomUpMsgBatch, BottomUpCheckpoint, TopdownCheckpoint, IpcMsgKind, OutcomeType} from "../../contracts/structs/CrossNet.sol"; import {FvmAddress} from "../../contracts/structs/FvmAddress.sol"; import {SubnetID, Subnet, IPCAddress, Validator} from "../../contracts/structs/Subnet.sol"; import {SubnetIDHelper} from "../../contracts/lib/SubnetIDHelper.sol"; @@ -161,8 +161,8 @@ contract MultiSubnetTest is Test, IntegrationTestBase { IpcEnvelope[] memory msgs = new IpcEnvelope[](1); msgs[0] = expected; - // TODO: commitParentFinality doesn't not affect anything in this test. - commitParentFinality(nativeSubnet.gatewayAddr); + // TODO: commitTopdownCheckpoint doesn't not affect anything in this test. + commitTopdownCheckpoint(nativeSubnet.gatewayAddr); executeTopDownMsgs(msgs, nativeSubnet.id, nativeSubnet.gateway); @@ -290,8 +290,8 @@ contract MultiSubnetTest is Test, IntegrationTestBase { IpcEnvelope[] memory msgs = new IpcEnvelope[](1); msgs[0] = expected; - // TODO: commitParentFinality doesn't not affect anything in this test. - commitParentFinality(nativeSubnet.gatewayAddr); + // TODO: commitTopdownCheckpoint doesn't not affect anything in this test. + commitTopdownCheckpoint(nativeSubnet.gatewayAddr); vm.expectRevert(); executeTopDownMsgsRevert(msgs, nativeSubnet.id, nativeSubnet.gateway); @@ -327,7 +327,7 @@ contract MultiSubnetTest is Test, IntegrationTestBase { IpcEnvelope[] memory msgs = new IpcEnvelope[](1); msgs[0] = expected; - commitParentFinality(tokenSubnet.gatewayAddr); + commitTopdownCheckpoint(tokenSubnet.gatewayAddr); executeTopDownMsgs(msgs, tokenSubnet.id, tokenSubnet.gateway); @@ -574,7 +574,7 @@ contract MultiSubnetTest is Test, IntegrationTestBase { IpcEnvelope[] memory msgs = new IpcEnvelope[](1); msgs[0] = expected; - commitParentFinality(tokenSubnet.gatewayAddr); + commitTopdownCheckpoint(tokenSubnet.gatewayAddr); vm.expectRevert(); executeTopDownMsgsRevert(msgs, tokenSubnet.id, tokenSubnet.gateway); @@ -1133,7 +1133,7 @@ contract MultiSubnetTest is Test, IntegrationTestBase { IpcEnvelope[] memory msgs = new IpcEnvelope[](1); msgs[0] = xnetCallMsg; - commitParentFinality(nativeSubnet.gatewayAddr); + commitTopdownCheckpoint(nativeSubnet.gatewayAddr); executeTopDownMsgs(msgs, nativeSubnet.id, nativeSubnet.gateway); assertEq(address(recipient).balance, amount); @@ -1281,20 +1281,20 @@ contract MultiSubnetTest is Test, IntegrationTestBase { IpcEnvelope[] memory msgs = new IpcEnvelope[](1); msgs[0] = xnetCallMsg; - commitParentFinality(tokenSubnet.gatewayAddr); + commitTopdownCheckpoint(tokenSubnet.gatewayAddr); executeTopDownMsgs(msgs, tokenSubnet.id, tokenSubnet.gateway); assertEq(address(recipient).balance, amount); } - function commitParentFinality(address gateway) internal { + function commitTopdownCheckpoint(address gateway) internal { vm.roll(10); - ParentFinality memory finality = ParentFinality({height: block.number, blockHash: bytes32(0)}); + TopdownCheckpoint memory finality = TopdownCheckpoint({height: block.number, blockHash: bytes32(0), effectsCommitment: new bytes(0)}); TopDownFinalityFacet gwTopDownFinalityFacet = TopDownFinalityFacet(address(gateway)); vm.prank(FilAddress.SYSTEM_ACTOR); - gwTopDownFinalityFacet.commitParentFinality(finality); + gwTopDownFinalityFacet.commitTopdownCheckpoint(finality); } function executeTopDownMsgs(IpcEnvelope[] memory msgs, SubnetID memory subnet, GatewayDiamond gw) internal { diff --git a/fendermint/app/settings/src/lib.rs b/fendermint/app/settings/src/lib.rs index 1230b3b81..26d814c11 100644 --- a/fendermint/app/settings/src/lib.rs +++ b/fendermint/app/settings/src/lib.rs @@ -164,22 +164,13 @@ pub struct TopDownSettings { /// conservative and avoid other from rejecting the proposal because they don't see the /// height as final yet. pub chain_head_delay: BlockHeight, - /// The number of blocks on top of `chain_head_delay` to wait before proposing a height - /// as final on the parent chain, to avoid slight disagreements between validators whether - /// a block is final, or not just yet. - pub proposal_delay: BlockHeight, /// The max number of blocks one should make the topdown proposal pub max_proposal_range: BlockHeight, - /// The max number of blocks to hold in memory for parent syncer - pub max_cache_blocks: Option, + /// The max number of blocks to hold in the parent view store for topdown syncer + pub parent_view_store_max_blocks: Option, /// Parent syncing cron period, in seconds #[serde_as(as = "DurationSeconds")] pub polling_interval: Duration, - /// Top down exponential back off retry base - #[serde_as(as = "DurationSeconds")] - pub exponential_back_off: Duration, - /// The max number of retries for exponential backoff before giving up - pub exponential_retry_limit: usize, /// The parent rpc http endpoint pub parent_http_endpoint: Url, /// Timeout for calls to the parent Ethereum API. diff --git a/fendermint/app/src/app.rs b/fendermint/app/src/app.rs index c44b9addd..eed23e78b 100644 --- a/fendermint/app/src/app.rs +++ b/fendermint/app/src/app.rs @@ -30,6 +30,8 @@ use fendermint_vm_interpreter::{ }; use fendermint_vm_message::query::FvmQueryHeight; use fendermint_vm_snapshot::{SnapshotClient, SnapshotError}; +use fendermint_vm_topdown::launch::Toggle; +use fendermint_vm_topdown::TopdownClient; use fvm::engine::MultiEngine; use fvm_ipld_blockstore::Blockstore; use fvm_shared::chainid::ChainID; @@ -312,6 +314,10 @@ where Ok(ret) } + pub async fn enable_topdown(&mut self, topdown: TopdownClient) { + self.chain_env.topdown_client = Toggle::enable(topdown); + } + /// Get a read only fvm execution state. This is useful to perform query commands targeting /// the latest state. pub fn new_read_only_exec_state( diff --git a/fendermint/app/src/cmd/debug.rs b/fendermint/app/src/cmd/debug.rs index 391f767b5..1c8431b7c 100644 --- a/fendermint/app/src/cmd/debug.rs +++ b/fendermint/app/src/cmd/debug.rs @@ -32,39 +32,6 @@ cmd! { } } -async fn export_topdown_events(args: &DebugExportTopDownEventsArgs) -> anyhow::Result<()> { - // Configuration for the child subnet on the parent network, - // based on how it's done in `run.rs` and the `genesis ipc from-parent` command. - let parent_provider = IpcProvider::new_with_subnet( - None, - ipc_provider::config::Subnet { - id: args - .subnet_id - .parent() - .ok_or_else(|| anyhow!("subnet is not a child"))?, - config: SubnetConfig::Fevm(EVMSubnet { - provider_http: args.parent_endpoint.clone(), - provider_timeout: None, - auth_token: args.parent_auth_token.clone(), - registry_addr: args.parent_registry, - gateway_addr: args.parent_gateway, - }), - }, - )?; - - let parent_proxy = IPCProviderProxy::new(parent_provider, args.subnet_id.clone()) - .context("failed to create provider proxy")?; - - let events = fendermint_vm_topdown::sync::fetch_topdown_events( - &parent_proxy, - args.start_block_height, - args.end_block_height, - ) - .await - .context("failed to fetch topdown events")?; - - let json = serde_json::to_string_pretty(&events)?; - std::fs::write(&args.events_file, json)?; - - Ok(()) +async fn export_topdown_events(_args: &DebugExportTopDownEventsArgs) -> anyhow::Result<()> { + todo!("integrate new RPC endpoints") } diff --git a/fendermint/app/src/cmd/run.rs b/fendermint/app/src/cmd/run.rs index dca099b1f..722d35b6d 100644 --- a/fendermint/app/src/cmd/run.rs +++ b/fendermint/app/src/cmd/run.rs @@ -1,10 +1,14 @@ // Copyright 2022-2024 Protocol Labs // SPDX-License-Identifier: Apache-2.0, MIT +use crate::cmd::key::read_secret_key; +use crate::{cmd, options::run::RunArgs, settings::Settings}; use anyhow::{anyhow, bail, Context}; use async_stm::atomically_or_err; +use async_trait::async_trait; use fendermint_abci::ApplicationService; -use fendermint_app::ipc::{AppParentFinalityQuery, AppVote}; +use fendermint_app::ipc::AppParentFinalityQuery; +use fendermint_app::observe::register_metrics as register_consensus_metrics; use fendermint_app::{App, AppConfig, AppStore, BitswapBlockstore}; use fendermint_app_settings::AccountKind; use fendermint_crypto::SecretKey; @@ -21,27 +25,36 @@ use fendermint_vm_interpreter::{ }; use fendermint_vm_resolver::ipld::IpldResolver; use fendermint_vm_snapshot::{SnapshotManager, SnapshotParams}; +use fendermint_vm_topdown::launch::{run_topdown, Toggle}; +use fendermint_vm_topdown::observation::ObservationConfig; use fendermint_vm_topdown::observe::register_metrics as register_topdown_metrics; -use fendermint_vm_topdown::proxy::{IPCProviderProxy, IPCProviderProxyWithLatency}; -use fendermint_vm_topdown::sync::launch_polling_syncer; -use fendermint_vm_topdown::voting::{publish_vote_loop, Error as VoteError, VoteTally}; -use fendermint_vm_topdown::{CachedFinalityProvider, IPCParentFinality, Toggle}; +use fendermint_vm_topdown::proxy::{ + IPCProviderProxy, IPCProviderProxyWithLatency, ParentQueryProxy, +}; +use fendermint_vm_topdown::syncer::payload::ParentBlockView; +use fendermint_vm_topdown::syncer::poll::ParentPoll; +use fendermint_vm_topdown::syncer::store::{InMemoryParentViewStore, ParentViewStore}; +use fendermint_vm_topdown::syncer::{start_parent_syncer, ParentPoller, ParentSyncerConfig}; +use fendermint_vm_topdown::vote::error::Error; +use fendermint_vm_topdown::vote::gossip::GossipClient; +use fendermint_vm_topdown::vote::payload::Vote; +use fendermint_vm_topdown::vote::VoteConfig; +use fendermint_vm_topdown::{BlockHeight, Checkpoint, TopdownClient}; use fvm_shared::address::{current_network, Address, Network}; -use ipc_ipld_resolver::{Event as ResolverEvent, VoteRecord}; +use ipc_api::subnet_id::SubnetID; +use ipc_ipld_resolver::{Event as ResolverEvent, Event, SubnetVoteRecord}; use ipc_observability::observe::register_metrics as register_default_metrics; use ipc_provider::config::subnet::{EVMSubnet, SubnetConfig}; use ipc_provider::IpcProvider; use libp2p::identity::secp256k1; use libp2p::identity::Keypair; use std::sync::Arc; -use tokio::sync::broadcast::error::RecvError; +use tendermint_rpc::Client; +use tokio::sync::broadcast; +use tokio::sync::broadcast::error::{RecvError, TryRecvError}; use tower::ServiceBuilder; use tracing::info; -use crate::cmd::key::read_secret_key; -use crate::{cmd, options::run::RunArgs, settings::Settings}; -use fendermint_app::observe::register_metrics as register_consensus_metrics; - cmd! { RunArgs(self, settings) { run(settings).await @@ -102,15 +115,7 @@ async fn run(settings: Settings) -> anyhow::Result<()> { } }; - let validator_keypair = validator.as_ref().map(|(sk, _)| { - let mut bz = sk.serialize(); - let sk = libp2p::identity::secp256k1::SecretKey::try_from_bytes(&mut bz) - .expect("secp256k1 secret key"); - let kp = libp2p::identity::secp256k1::Keypair::from(sk); - libp2p::identity::Keypair::from(kp) - }); - - let validator_ctx = validator.map(|(sk, addr)| { + let validator_ctx = validator.clone().map(|(sk, addr)| { // For now we are using the validator key for submitting transactions. // This allows us to identify transactions coming from empowered validators, to give priority to protocol related transactions. let broadcaster = Broadcaster::new( @@ -161,12 +166,9 @@ async fn run(settings: Settings) -> anyhow::Result<()> { NamespaceBlockstore::new(db.clone(), ns.state_store).context("error creating state DB")?; let checkpoint_pool = CheckpointPool::new(); - let parent_finality_votes = VoteTally::empty(); - - let topdown_enabled = settings.topdown_enabled(); // If enabled, start a resolver that communicates with the application through the resolve pool. - if settings.resolver_enabled() { + let ipld_gossip_client = if settings.resolver_enabled() { let mut service = make_resolver_service(&settings, db.clone(), state_store.clone(), ns.bit_store)?; @@ -192,36 +194,13 @@ async fn run(settings: Settings) -> anyhow::Result<()> { own_subnet_id.clone(), ); - if topdown_enabled { - if let Some(key) = validator_keypair { - let parent_finality_votes = parent_finality_votes.clone(); - - tracing::info!("starting the parent finality vote gossip loop..."); - tokio::spawn(async move { - publish_vote_loop( - parent_finality_votes, - settings.ipc.vote_interval, - settings.ipc.vote_timeout, - key, - own_subnet_id, - client, - |height, block_hash| { - AppVote::ParentFinality(IPCParentFinality { height, block_hash }) - }, - ) - .await - }); - } - } else { - tracing::info!("parent finality vote gossip disabled"); - } - - tracing::info!("subscribing to gossip..."); + info!("subscribing to gossip..."); let rx = service.subscribe(); - let parent_finality_votes = parent_finality_votes.clone(); - tokio::spawn(async move { - dispatch_resolver_events(rx, parent_finality_votes, topdown_enabled).await; - }); + let gossip_client = IPLDGossip { + rx, + client, + subnet: own_subnet_id, + }; tracing::info!("starting the IPLD Resolver Service..."); tokio::spawn(async move { @@ -232,40 +211,11 @@ async fn run(settings: Settings) -> anyhow::Result<()> { tracing::info!("starting the IPLD Resolver..."); tokio::spawn(async move { resolver.run().await }); - } else { - tracing::info!("IPLD Resolver disabled.") - } - let (parent_finality_provider, ipc_tuple) = if topdown_enabled { - info!("topdown finality enabled"); - let topdown_config = settings.ipc.topdown_config()?; - let mut config = fendermint_vm_topdown::Config::new( - topdown_config.chain_head_delay, - topdown_config.polling_interval, - topdown_config.exponential_back_off, - topdown_config.exponential_retry_limit, - ) - .with_proposal_delay(topdown_config.proposal_delay) - .with_max_proposal_range(topdown_config.max_proposal_range); - - if let Some(v) = topdown_config.max_cache_blocks { - info!(value = v, "setting max cache blocks"); - config = config.with_max_cache_blocks(v); - } - - let ipc_provider = { - let p = make_ipc_provider_proxy(&settings)?; - Arc::new(IPCProviderProxyWithLatency::new(p)) - }; - - let finality_provider = - CachedFinalityProvider::uninitialized(config.clone(), ipc_provider.clone()).await?; - - let p = Arc::new(Toggle::enabled(finality_provider)); - (p, Some((ipc_provider, config))) + Some(gossip_client) } else { - info!("topdown finality disabled"); - (Arc::new(Toggle::disabled()), None) + tracing::info!("IPLD Resolver disabled."); + None }; // Start a snapshot manager in the background. @@ -294,7 +244,7 @@ async fn run(settings: Settings) -> anyhow::Result<()> { None }; - let app: App<_, _, AppStore, _> = App::new( + let mut app: App<_, _, AppStore, _> = App::new( AppConfig { app_namespace: ns.app, state_hist_namespace: ns.state_hist, @@ -306,29 +256,66 @@ async fn run(settings: Settings) -> anyhow::Result<()> { interpreter, ChainEnv { checkpoint_pool, - parent_finality_provider: parent_finality_provider.clone(), - parent_finality_votes: parent_finality_votes.clone(), + topdown_client: Toggle::::disable(), }, snapshots, )?; - if let Some((agent_proxy, config)) = ipc_tuple { + if settings.topdown_enabled() { + info!("topdown finality enabled"); + let app_parent_finality_query = AppParentFinalityQuery::new(app.clone()); - tokio::spawn(async move { - match launch_polling_syncer( - app_parent_finality_query, - config, - parent_finality_provider, - parent_finality_votes, - agent_proxy, - tendermint_client, - ) - .await - { - Ok(_) => {} - Err(e) => tracing::error!("cannot launch polling syncer: {e}"), - } - }); + + let topdown_config = settings.ipc.topdown_config()?; + let mut config = fendermint_vm_topdown::Config { + syncer: ParentSyncerConfig { + request_channel_size: 1024, + broadcast_channel_size: 1024, + chain_head_delay: topdown_config.chain_head_delay, + polling_interval: topdown_config.polling_interval, + max_store_blocks: topdown_config.parent_view_store_max_blocks.unwrap_or(2000), + sync_many: true, + observation: ObservationConfig { + max_observation_range: Some(topdown_config.max_proposal_range), + }, + }, + voting: VoteConfig { + req_channel_buffer_size: 1024, + req_batch_processing_size: 10, + gossip_req_processing_size: 256, + voting_sleep_interval_sec: 10, + }, + }; + + let parent_proxy = Arc::new(IPCProviderProxyWithLatency::new(make_ipc_provider_proxy( + &settings, + )?)); + let parent_view_store = InMemoryParentViewStore::new(); + + let gossip_client = ipld_gossip_client + .ok_or_else(|| anyhow!("topdown enabled but ipld is not, enable ipld first"))?; + + let client = run_topdown( + app_parent_finality_query, + config, + validator + .clone() + .ok_or_else(|| anyhow!("need validator key to run topdown"))? + .0, + gossip_client, + parent_proxy, + move |checkpoint, proxy, config, rx| { + let poller_inner = + ParentPoll::new(config, proxy, parent_view_store, checkpoint.clone()); + TendermintAwareParentPoller { + client: tendermint_client.clone(), + inner: poller_inner, + } + }, + ) + .await?; + + app.enable_topdown(client); } // Start the metrics on a background thread. @@ -402,7 +389,7 @@ fn make_resolver_service( db: RocksDb, state_store: NamespaceBlockstore, bit_store_ns: String, -) -> anyhow::Result> { +) -> anyhow::Result> { // Blockstore for Bitswap. let bit_store = NamespaceBlockstore::new(db, bit_store_ns).context("error creating bit DB")?; @@ -505,65 +492,85 @@ fn to_address(sk: &SecretKey, kind: &AccountKind) -> anyhow::Result
{ } } -async fn dispatch_resolver_events( - mut rx: tokio::sync::broadcast::Receiver>, - parent_finality_votes: VoteTally, - topdown_enabled: bool, -) { - loop { - match rx.recv().await { - Ok(event) => match event { - ResolverEvent::ReceivedPreemptive(_, _) => {} - ResolverEvent::ReceivedVote(vote) => { - dispatch_vote(*vote, &parent_finality_votes, topdown_enabled).await; - } +struct IPLDGossip { + rx: broadcast::Receiver>, + client: ipc_ipld_resolver::Client, + subnet: SubnetID, +} + +#[async_trait] +impl GossipClient for IPLDGossip { + fn try_poll_vote(&mut self) -> Result, Error> { + Ok(match self.rx.try_recv() { + Ok(v) => match v { + ResolverEvent::ReceivedVote(v) => Some(*v), + _ => None, }, - Err(RecvError::Lagged(n)) => { - tracing::warn!("the resolver service skipped {n} gossip events") + Err(TryRecvError::Lagged(n)) => { + tracing::warn!("the resolver service skipped {n} gossip events"); + None } - Err(RecvError::Closed) => { + Err(TryRecvError::Closed) => { tracing::error!("the resolver service stopped receiving gossip"); - return; + None } - } + Err(TryRecvError::Empty) => None, + }) + } + + async fn publish_vote(&self, vote: Vote) -> Result<(), Error> { + let v = SubnetVoteRecord { + subnet: self.subnet.clone(), + vote, + }; + self.client + .publish_vote(v) + .map_err(|e| Error::CannotPublishVote(e.to_string())) } } -async fn dispatch_vote( - vote: VoteRecord, - parent_finality_votes: &VoteTally, - topdown_enabled: bool, -) { - match vote.content { - AppVote::ParentFinality(f) => { - if !topdown_enabled { - tracing::debug!("ignoring vote; topdown disabled"); - return; - } - let res = atomically_or_err(|| { - parent_finality_votes.add_vote( - vote.public_key.clone(), - f.height, - f.block_hash.clone(), - ) - }) - .await; - - match res { - Err(e @ VoteError::Equivocation(_, _, _, _)) => { - tracing::warn!(error = e.to_string(), "failed to handle vote"); - } - Err(e @ ( - VoteError::Uninitialized // early vote, we're not ready yet - | VoteError::UnpoweredValidator(_) // maybe arrived too early or too late, or spam - | VoteError::UnexpectedBlock(_, _) // won't happen here - )) => { - tracing::debug!(error = e.to_string(), "failed to handle vote"); - } - _ => { - tracing::debug!("vote handled"); - } - }; +struct TendermintAwareParentPoller { + client: tendermint_rpc::HttpClient, + inner: ParentPoll, +} + +#[async_trait] +impl ParentPoller for TendermintAwareParentPoller +where + S: ParentViewStore + Send + Sync + 'static, + P: Send + Sync + 'static + ParentQueryProxy, +{ + fn last_checkpoint(&self) -> &Checkpoint { + self.inner.last_checkpoint() + } + + fn finalize(&mut self, checkpoint: Checkpoint) -> anyhow::Result<()> { + self.inner.finalize(checkpoint) + } + + async fn try_poll(&mut self) -> anyhow::Result<()> { + if self.is_syncing_peer().await? { + tracing::debug!("syncing with peer, skip parent finality syncing this round"); + return Ok(()); } + self.inner.try_poll().await + } + + fn dump_parent_block_views( + &self, + to: BlockHeight, + ) -> anyhow::Result>> { + self.inner.dump_parent_block_views(to) + } +} + +impl TendermintAwareParentPoller { + async fn is_syncing_peer(&self) -> anyhow::Result { + let status: tendermint_rpc::endpoint::status::Response = self + .client + .status() + .await + .context("failed to get Tendermint status")?; + Ok(status.sync_info.catching_up) } } diff --git a/fendermint/app/src/ipc.rs b/fendermint/app/src/ipc.rs index 6a03bc4b2..f4d17bb28 100644 --- a/fendermint/app/src/ipc.rs +++ b/fendermint/app/src/ipc.rs @@ -9,20 +9,13 @@ use fendermint_vm_genesis::{Power, Validator}; use fendermint_vm_interpreter::fvm::state::ipc::GatewayCaller; use fendermint_vm_interpreter::fvm::state::{FvmExecState, FvmStateParams}; use fendermint_vm_interpreter::fvm::store::ReadOnlyBlockstore; -use fendermint_vm_topdown::sync::ParentFinalityStateQuery; -use fendermint_vm_topdown::IPCParentFinality; +use fendermint_vm_topdown::launch::LaunchQuery; use fvm_ipld_blockstore::Blockstore; use std::sync::Arc; +use fendermint_vm_topdown::Checkpoint; use serde::{Deserialize, Serialize}; -/// All the things that can be voted on in a subnet. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub enum AppVote { - /// The validator considers a certain block final on the parent chain. - ParentFinality(IPCParentFinality), -} - /// Queries the LATEST COMMITTED parent finality from the storage pub struct AppParentFinalityQuery where @@ -62,7 +55,7 @@ where } } -impl ParentFinalityStateQuery for AppParentFinalityQuery +impl LaunchQuery for AppParentFinalityQuery where S: KVStore + Codec @@ -72,10 +65,10 @@ where DB: KVWritable + KVReadable + 'static + Clone, SS: Blockstore + 'static + Clone, { - fn get_latest_committed_finality(&self) -> anyhow::Result> { + fn get_latest_checkpoint(&self) -> anyhow::Result> { self.with_exec_state(|mut exec_state| { self.gateway_caller - .get_latest_parent_finality(&mut exec_state) + .get_latest_topdown_checkpoint(&mut exec_state) }) } @@ -86,4 +79,9 @@ where .map(|(_, pt)| pt) }) } + + fn latest_chain_block(&self) -> anyhow::Result { + self.with_exec_state(|s| Ok(s.block_height() as fendermint_vm_topdown::BlockHeight)) + .map(|v| v.unwrap_or(1)) + } } diff --git a/fendermint/vm/interpreter/src/chain.rs b/fendermint/vm/interpreter/src/chain.rs index 4284c34ec..931169eed 100644 --- a/fendermint/vm/interpreter/src/chain.rs +++ b/fendermint/vm/interpreter/src/chain.rs @@ -14,27 +14,23 @@ use async_trait::async_trait; use fendermint_tracing::emit; use fendermint_vm_actor_interface::ipc; use fendermint_vm_event::ParentFinalityMissingQuorum; -use fendermint_vm_message::ipc::ParentFinality; +use fendermint_vm_genesis::ValidatorKey; use fendermint_vm_message::{ chain::ChainMessage, ipc::{BottomUpCheckpoint, CertifiedMessage, IpcMessage, SignedRelayedMessage}, }; use fendermint_vm_resolver::pool::{ResolveKey, ResolvePool}; +use fendermint_vm_topdown::launch::Toggle; use fendermint_vm_topdown::proxy::IPCProviderProxyWithLatency; -use fendermint_vm_topdown::voting::{ValidatorKey, VoteTally}; -use fendermint_vm_topdown::{ - CachedFinalityProvider, IPCParentFinality, ParentFinalityProvider, ParentViewProvider, Toggle, -}; +use fendermint_vm_topdown::{Checkpoint, TopdownClient}; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; -use fvm_shared::clock::ChainEpoch; use fvm_shared::econ::TokenAmount; use num_traits::Zero; use std::sync::Arc; /// A resolution pool for bottom-up and top-down checkpoints. pub type CheckpointPool = ResolvePool; -pub type TopDownFinalityProvider = Arc>>; /// These are the extra state items that the chain interpreter needs, /// a sort of "environment" supporting IPC. @@ -42,9 +38,8 @@ pub type TopDownFinalityProvider = Arc, } #[derive(Clone, Hash, PartialEq, Eq)] @@ -122,51 +117,12 @@ where CheckpointPoolItem::BottomUp(ckpt) => ChainMessage::Ipc(IpcMessage::BottomUpExec(ckpt)), }); - // Prepare top down proposals. - // Before we try to find a quorum, pause incoming votes. This is optional but if there are lots of votes coming in it might hold up proposals. - atomically(|| state.parent_finality_votes.pause_votes_until_find_quorum()).await; - - // The pre-requisite for proposal is that there is a quorum of gossiped votes at that height. - // The final proposal can be at most as high as the quorum, but can be less if we have already, - // hit some limits such as how many blocks we can propose in a single step. - let finalities = atomically(|| { - let parent = state.parent_finality_provider.next_proposal()?; - let quorum = state - .parent_finality_votes - .find_quorum()? - .map(|(height, block_hash)| IPCParentFinality { height, block_hash }); - - Ok((parent, quorum)) - }) - .await; - - let maybe_finality = match finalities { - (Some(parent), Some(quorum)) => Some(if parent.height <= quorum.height { - parent - } else { - quorum - }), - (Some(parent), None) => { - emit!( - DEBUG, - ParentFinalityMissingQuorum { - block_height: parent.height, - block_hash: &hex::encode(&parent.block_hash), - } - ); - None + match state.topdown_client.find_topdown_proposal().await { + Ok(Some(p)) => msgs.push(ChainMessage::Ipc(IpcMessage::TopDownExec(p))), + Ok(None) => {} + Err(e) => { + tracing::error!(err = e.to_string(), "cannot find topdown proposal"); } - (None, _) => { - // This is normal, the parent probably hasn't produced a block yet. - None - } - }; - - if let Some(finality) = maybe_finality { - msgs.push(ChainMessage::Ipc(IpcMessage::TopDownExec(ParentFinality { - height: finality.height as ChainEpoch, - block_hash: finality.block_hash, - }))) } // Append at the end - if we run out of block space, these are going to be reproposed in the next block. @@ -197,18 +153,17 @@ where return Ok(false); } } - ChainMessage::Ipc(IpcMessage::TopDownExec(ParentFinality { - height, - block_hash, - })) => { - let prop = IPCParentFinality { - height: height as u64, - block_hash, - }; - let is_final = - atomically(|| env.parent_finality_provider.check_proposal(&prop)).await; - if !is_final { - return Ok(false); + ChainMessage::Ipc(IpcMessage::TopDownExec(p)) => { + let proposal_height = p.cert.payload().parent_height(); + + match env.topdown_client.validate_quorum_proposal(p).await { + Ok(_) => { + tracing::info!(proposal_height, "validated quorum proposal"); + } + Err(e) => { + tracing::error!(err = e.to_string(), "cannot validate quorum proposal"); + return Ok(false); + } } } _ => {} @@ -288,57 +243,32 @@ where todo!("#197: implement BottomUp checkpoint execution") } IpcMessage::TopDownExec(p) => { - if !env.parent_finality_provider.is_enabled() { - bail!("cannot execute IPC top-down message: parent provider disabled"); - } - - // commit parent finality first - let finality = IPCParentFinality::new(p.height, p.block_hash); + let checkpoint = Checkpoint::from(p.cert.payload()); tracing::debug!( - finality = finality.to_string(), + checkpoint = checkpoint.to_string(), "chain interpreter received topdown exec proposal", ); - let (prev_height, prev_finality) = topdown::commit_finality( + let prev_checkpoint = topdown::commit_checkpoint( &self.gateway_caller, &mut state, - finality.clone(), - &env.parent_finality_provider, + checkpoint.clone(), ) .await .context("failed to commit finality")?; tracing::debug!( - previous_committed_height = prev_height, - previous_committed_finality = prev_finality + previous_committed_finality = prev_checkpoint .as_ref() .map(|f| format!("{f}")) .unwrap_or_else(|| String::from("None")), "chain interpreter committed topdown finality", ); - // The height range we pull top-down effects from. This _includes_ the proposed - // finality, as we assume that the interface we query publishes only fully - // executed blocks as the head of the chain. This is certainly the case for - // Ethereum-compatible JSON-RPC APIs, like Filecoin's. It should be the case - // too for future Filecoin light clients. - // - // Another factor to take into account is the chain_head_delay, which must be - // non-zero. So even in the case where deferred execution leaks through our - // query mechanism, it should not be problematic because we're guaranteed to - // be _at least_ 1 height behind. - let (execution_fr, execution_to) = (prev_height + 1, finality.height); - // error happens if we cannot get the validator set from ipc agent after retries - let validator_changes = env - .parent_finality_provider - .validator_changes_from(execution_fr, execution_to) - .await - .context("failed to fetch validator changes")?; + let validator_changes = p.effects.1; tracing::debug!( - from = execution_fr, - to = execution_to, msgs = validator_changes.len(), "chain interpreter received total validator changes" ); @@ -348,16 +278,10 @@ where .context("failed to store validator changes")?; // error happens if we cannot get the cross messages from ipc agent after retries - let msgs = env - .parent_finality_provider - .top_down_msgs_from(execution_fr, execution_to) - .await - .context("failed to fetch top down messages")?; + let msgs = p.effects.0; tracing::debug!( number_of_messages = msgs.len(), - start = execution_fr, - end = execution_to, "chain interpreter received topdown msgs", ); @@ -367,28 +291,13 @@ where tracing::debug!("chain interpreter applied topdown msgs"); - let local_block_height = state.block_height() as u64; - let proposer = state.validator_id().map(|id| id.to_string()); - let proposer_ref = proposer.as_deref(); - - atomically(|| { - env.parent_finality_provider - .set_new_finality(finality.clone(), prev_finality.clone())?; - - env.parent_finality_votes.set_finalized( - finality.height, - finality.block_hash.clone(), - proposer_ref, - Some(local_block_height), - )?; - - Ok(()) - }) - .await; + env.topdown_client + .parent_finalized(checkpoint.clone()) + .await?; tracing::debug!( - finality = finality.to_string(), - "chain interpreter has set new" + checkpoint = checkpoint.to_string(), + "chain interpreter has set new topdown checkpoint" ); Ok(((env, state), ChainMessageApplyRet::Ipc(ret))) @@ -417,17 +326,13 @@ where .0 .iter() .map(|v| { - let vk = ValidatorKey::from(v.public_key.0); + let vk = ValidatorKey::new(v.public_key.0); let w = v.power.0; (vk, w) }) .collect::>(); - atomically(|| { - env.parent_finality_votes - .update_power_table(power_updates.clone()) - }) - .await; + env.topdown_client.update_power_table(power_updates).await?; } Ok(((env, state), out)) diff --git a/fendermint/vm/interpreter/src/fvm/state/ipc.rs b/fendermint/vm/interpreter/src/fvm/state/ipc.rs index 12caa26c6..32f81dca4 100644 --- a/fendermint/vm/interpreter/src/fvm/state/ipc.rs +++ b/fendermint/vm/interpreter/src/fvm/state/ipc.rs @@ -18,7 +18,7 @@ use fendermint_vm_actor_interface::{ use fendermint_vm_genesis::{Collateral, Power, PowerScale, Validator, ValidatorKey}; use fendermint_vm_message::conv::{from_eth, from_fvm}; use fendermint_vm_message::signed::sign_secp256k1; -use fendermint_vm_topdown::IPCParentFinality; +use fendermint_vm_topdown::Checkpoint; use ipc_actors_abis::checkpointing_facet::CheckpointingFacet; use ipc_actors_abis::gateway_getter_facet::GatewayGetterFacet; @@ -230,23 +230,23 @@ impl GatewayCaller { Ok(calldata) } - /// Commit the parent finality to the gateway and returns the previously committed finality. - /// None implies there is no previously committed finality. - pub fn commit_parent_finality( + /// Commit the parent checkpoint to the gateway and returns the previously committed checkpoint. + /// None implies there is no previously committed checkpoint. + pub fn commit_topdown_checkpoint( &self, state: &mut FvmExecState, - finality: IPCParentFinality, - ) -> anyhow::Result> { - let evm_finality = top_down_finality_facet::ParentFinality::try_from(finality)?; + checkpoint: Checkpoint, + ) -> anyhow::Result> { + let evm_finality = top_down_finality_facet::TopdownCheckpoint::try_from(checkpoint)?; - let (has_committed, prev_finality) = self + let (has_committed, prev_checkpoint) = self .topdown - .call(state, |c| c.commit_parent_finality(evm_finality))?; + .call(state, |c| c.commit_topdown_checkpoint(evm_finality))?; Ok(if !has_committed { None } else { - Some(IPCParentFinality::from(prev_finality)) + Some(Checkpoint::from(prev_checkpoint)) }) } @@ -298,14 +298,14 @@ impl GatewayCaller { Ok(r.into_return()) } - pub fn get_latest_parent_finality( + pub fn get_latest_topdown_checkpoint( &self, state: &mut FvmExecState, - ) -> anyhow::Result { + ) -> anyhow::Result { let r = self .getter - .call(state, |c| c.get_latest_parent_finality())?; - Ok(IPCParentFinality::from(r)) + .call(state, |c| c.get_latest_topdown_checkpoint())?; + Ok(Checkpoint::from(r)) } /// Get the Ethereum adresses of validators who signed a checkpoint. diff --git a/fendermint/vm/interpreter/src/fvm/topdown.rs b/fendermint/vm/interpreter/src/fvm/topdown.rs index 8c9e77b3b..3d4060ee8 100644 --- a/fendermint/vm/interpreter/src/fvm/topdown.rs +++ b/fendermint/vm/interpreter/src/fvm/topdown.rs @@ -1,41 +1,39 @@ // Copyright 2022-2024 Protocol Labs // SPDX-License-Identifier: Apache-2.0, MIT + //! Topdown finality related util functions -use crate::chain::TopDownFinalityProvider; use crate::fvm::state::ipc::GatewayCaller; use crate::fvm::state::FvmExecState; use crate::fvm::FvmApplyRet; use anyhow::Context; -use fendermint_vm_topdown::{BlockHeight, IPCParentFinality, ParentViewProvider}; +use fendermint_vm_topdown::{BlockHeight, Checkpoint}; use fvm_ipld_blockstore::Blockstore; use ipc_api::cross::IpcEnvelope; use super::state::ipc::tokens_to_mint; -/// Commit the parent finality. Returns the height that the previous parent finality is committed and -/// the committed finality itself. If there is no parent finality committed, genesis epoch is returned. -pub async fn commit_finality( +/// Commit the topdown checkpoint. Returns the height that the previous parent checkpoint is committed and +/// the committed checkpoint itself. If there is no topdown checkpoint committed, genesis epoch is returned. +pub async fn commit_checkpoint( gateway_caller: &GatewayCaller, state: &mut FvmExecState, - finality: IPCParentFinality, - provider: &TopDownFinalityProvider, -) -> anyhow::Result<(BlockHeight, Option)> + checkpoint: Checkpoint, +) -> anyhow::Result> where DB: Blockstore + Sync + Send + Clone + 'static, { - let (prev_height, prev_finality) = - if let Some(prev_finality) = gateway_caller.commit_parent_finality(state, finality)? { - (prev_finality.height, Some(prev_finality)) - } else { - (provider.genesis_epoch()?, None) - }; + let prev_checkpoint = if let Some(prev_checkpoint) = + gateway_caller.commit_topdown_checkpoint(state, checkpoint)? + { + Some(prev_checkpoint) + } else { + None + }; - tracing::debug!( - "commit finality parsed: prev_height {prev_height}, prev_finality: {prev_finality:?}" - ); + tracing::debug!("commit checkpoint parsed, prev_checkpoint: {prev_checkpoint:?}"); - Ok((prev_height, prev_finality)) + Ok(prev_checkpoint) } /// Execute the top down messages implicitly. Before the execution, mint to the gateway of the funds diff --git a/fendermint/vm/message/Cargo.toml b/fendermint/vm/message/Cargo.toml index 3816b9170..d98b76bb2 100644 --- a/fendermint/vm/message/Cargo.toml +++ b/fendermint/vm/message/Cargo.toml @@ -27,6 +27,7 @@ fvm_ipld_encoding = { workspace = true } ipc-api = { workspace = true } fendermint_crypto = { path = "../../crypto" } +fendermint_vm_topdown = { path = "../../vm/topdown" } fendermint_vm_encoding = { path = "../encoding" } fendermint_vm_actor_interface = { path = "../actor_interface" } fendermint_testing = { path = "../../testing", optional = true } diff --git a/fendermint/vm/message/src/ipc.rs b/fendermint/vm/message/src/ipc.rs index 2e77d84c6..eecc27864 100644 --- a/fendermint/vm/message/src/ipc.rs +++ b/fendermint/vm/message/src/ipc.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0, MIT use cid::Cid; +use fendermint_vm_topdown::TopdownProposal; use fvm_shared::{ address::Address, clock::ChainEpoch, crypto::signature::Signature, econ::TokenAmount, }; @@ -27,7 +28,7 @@ pub enum IpcMessage { /// A top-down checkpoint parent finality proposal. This proposal should contain the latest parent /// state that to be checked and voted by validators. - TopDownExec(ParentFinality), + TopDownExec(TopdownProposal), } /// A message relayed by a user on the current subnet. diff --git a/fendermint/vm/topdown/src/convert.rs b/fendermint/vm/topdown/src/convert.rs index 09ec76958..e1ec6d802 100644 --- a/fendermint/vm/topdown/src/convert.rs +++ b/fendermint/vm/topdown/src/convert.rs @@ -2,43 +2,46 @@ // SPDX-License-Identifier: Apache-2.0, MIT //! Handles the type conversion to ethers contract types -use crate::IPCParentFinality; +use crate::Checkpoint; use anyhow::anyhow; -use ethers::types::U256; +use ethers::types::{Bytes, U256}; use ipc_actors_abis::{gateway_getter_facet, top_down_finality_facet}; -impl TryFrom for top_down_finality_facet::ParentFinality { +impl TryFrom for top_down_finality_facet::TopdownCheckpoint { type Error = anyhow::Error; - fn try_from(value: IPCParentFinality) -> Result { - if value.block_hash.len() != 32 { + fn try_from(value: Checkpoint) -> Result { + if value.target_hash().len() != 32 { return Err(anyhow!("invalid block hash length, expecting 32")); } let mut block_hash = [0u8; 32]; - block_hash.copy_from_slice(&value.block_hash[0..32]); + block_hash.copy_from_slice(&value.target_hash()[0..32]); Ok(Self { - height: U256::from(value.height), + height: U256::from(value.target_height()), block_hash, + effects_commitment: Bytes::from(value.cumulative_effects_comm().clone()), }) } } -impl From for IPCParentFinality { - fn from(value: gateway_getter_facet::ParentFinality) -> Self { - IPCParentFinality { - height: value.height.as_u64(), - block_hash: value.block_hash.to_vec(), - } +impl From for Checkpoint { + fn from(value: gateway_getter_facet::TopdownCheckpoint) -> Self { + Checkpoint::v1( + value.height.as_u64(), + value.block_hash.to_vec(), + value.effects_commitment.to_vec(), + ) } } -impl From for IPCParentFinality { - fn from(value: top_down_finality_facet::ParentFinality) -> Self { - IPCParentFinality { - height: value.height.as_u64(), - block_hash: value.block_hash.to_vec(), - } +impl From for Checkpoint { + fn from(value: top_down_finality_facet::TopdownCheckpoint) -> Self { + Checkpoint::v1( + value.height.as_u64(), + value.block_hash.to_vec(), + value.effects_commitment.to_vec(), + ) } } diff --git a/fendermint/vm/topdown/src/finality/fetch.rs b/fendermint/vm/topdown/src/finality/fetch.rs deleted file mode 100644 index fb4203045..000000000 --- a/fendermint/vm/topdown/src/finality/fetch.rs +++ /dev/null @@ -1,450 +0,0 @@ -// Copyright 2022-2024 Protocol Labs -// SPDX-License-Identifier: Apache-2.0, MIT - -use crate::finality::null::FinalityWithNull; -use crate::finality::ParentViewPayload; -use crate::proxy::ParentQueryProxy; -use crate::{ - handle_null_round, BlockHash, BlockHeight, Config, Error, IPCParentFinality, - ParentFinalityProvider, ParentViewProvider, -}; -use async_stm::{Stm, StmResult}; -use ipc_api::cross::IpcEnvelope; -use ipc_api::staking::StakingChangeRequest; -use std::sync::Arc; - -/// The finality provider that performs io to the parent if not found in cache -#[derive(Clone)] -pub struct CachedFinalityProvider { - inner: FinalityWithNull, - config: Config, - /// The ipc client proxy that works as a back up if cache miss - parent_client: Arc, -} - -/// Exponential backoff for futures -macro_rules! retry { - ($wait:expr, $retires:expr, $f:expr) => {{ - let mut retries = $retires; - let mut wait = $wait; - - loop { - let res = $f; - if let Err(e) = &res { - // there is no point in retrying if the current block is null round - if crate::is_null_round_str(&e.to_string()) { - tracing::warn!( - "cannot query ipc parent_client due to null round, skip retry" - ); - break res; - } - - tracing::warn!( - error = e.to_string(), - retries, - wait = ?wait, - "cannot query ipc parent_client" - ); - - if retries > 0 { - retries -= 1; - - tokio::time::sleep(wait).await; - - wait *= 2; - continue; - } - } - - break res; - } - }}; -} - -#[async_trait::async_trait] -impl ParentViewProvider for CachedFinalityProvider { - fn genesis_epoch(&self) -> anyhow::Result { - self.inner.genesis_epoch() - } - - async fn validator_changes_from( - &self, - from: BlockHeight, - to: BlockHeight, - ) -> anyhow::Result> { - let mut v = vec![]; - for h in from..=to { - let mut r = self.validator_changes(h).await?; - tracing::debug!( - number_of_messages = r.len(), - height = h, - "fetched validator change set", - ); - v.append(&mut r); - } - - Ok(v) - } - - /// Get top down message in the range `from` to `to`, both inclusive. For the check to be valid, one - /// should not pass a height `to` that is a null block, otherwise the check is useless. In debug - /// mode, it will throw an error. - async fn top_down_msgs_from( - &self, - from: BlockHeight, - to: BlockHeight, - ) -> anyhow::Result> { - let mut v = vec![]; - for h in from..=to { - let mut r = self.top_down_msgs(h).await?; - tracing::debug!( - number_of_top_down_messages = r.len(), - height = h, - "obtained topdown messages", - ); - v.append(&mut r); - } - Ok(v) - } -} - -impl ParentFinalityProvider - for CachedFinalityProvider -{ - fn next_proposal(&self) -> Stm> { - self.inner.next_proposal() - } - - fn check_proposal(&self, proposal: &IPCParentFinality) -> Stm { - self.inner.check_proposal(proposal) - } - - fn set_new_finality( - &self, - finality: IPCParentFinality, - previous_finality: Option, - ) -> Stm<()> { - self.inner.set_new_finality(finality, previous_finality) - } -} - -impl CachedFinalityProvider { - /// Creates an uninitialized provider - /// We need this because `fendermint` has yet to be initialized and might - /// not be able to provide an existing finality from the storage. This provider requires an - /// existing committed finality. Providing the finality will enable other functionalities. - pub async fn uninitialized(config: Config, parent_client: Arc) -> anyhow::Result { - let genesis = parent_client.get_genesis_epoch().await?; - Ok(Self::new(config, genesis, None, parent_client)) - } - - /// Should always return the top down messages, only when ipc parent_client is down after exponential - /// retries - async fn validator_changes( - &self, - height: BlockHeight, - ) -> anyhow::Result> { - let r = self.inner.validator_changes(height).await?; - - if let Some(v) = r { - return Ok(v); - } - - let r = retry!( - self.config.exponential_back_off, - self.config.exponential_retry_limit, - self.parent_client - .get_validator_changes(height) - .await - .map(|r| r.value) - ); - - handle_null_round(r, Vec::new) - } - - /// Should always return the top down messages, only when ipc parent_client is down after exponential - /// retries - async fn top_down_msgs(&self, height: BlockHeight) -> anyhow::Result> { - let r = self.inner.top_down_msgs(height).await?; - - if let Some(v) = r { - return Ok(v); - } - - let r = retry!( - self.config.exponential_back_off, - self.config.exponential_retry_limit, - self.parent_client - .get_top_down_msgs(height) - .await - .map(|r| r.value) - ); - - handle_null_round(r, Vec::new) - } -} - -impl CachedFinalityProvider { - pub(crate) fn new( - config: Config, - genesis_epoch: BlockHeight, - committed_finality: Option, - parent_client: Arc, - ) -> Self { - let inner = FinalityWithNull::new(config.clone(), genesis_epoch, committed_finality); - Self { - inner, - config, - parent_client, - } - } - - pub fn block_hash(&self, height: BlockHeight) -> Stm> { - self.inner.block_hash_at_height(height) - } - - pub fn latest_height_in_cache(&self) -> Stm> { - self.inner.latest_height_in_cache() - } - - /// Get the latest height tracked in the provider, includes both cache and last committed finality - pub fn latest_height(&self) -> Stm> { - self.inner.latest_height() - } - - pub fn last_committed_finality(&self) -> Stm> { - self.inner.last_committed_finality() - } - - /// Clear the cache and set the committed finality to the provided value - pub fn reset(&self, finality: IPCParentFinality) -> Stm<()> { - self.inner.reset(finality) - } - - pub fn new_parent_view( - &self, - height: BlockHeight, - maybe_payload: Option, - ) -> StmResult<(), Error> { - self.inner.new_parent_view(height, maybe_payload) - } - - /// Returns the number of blocks cached. - pub fn cached_blocks(&self) -> Stm { - self.inner.cached_blocks() - } - - pub fn first_non_null_block(&self, height: BlockHeight) -> Stm> { - self.inner.first_non_null_block(height) - } -} - -#[cfg(test)] -mod tests { - use crate::finality::ParentViewPayload; - use crate::proxy::ParentQueryProxy; - use crate::{ - BlockHeight, CachedFinalityProvider, Config, IPCParentFinality, ParentViewProvider, - SequentialKeyCache, NULL_ROUND_ERR_MSG, - }; - use anyhow::anyhow; - use async_trait::async_trait; - use fvm_shared::address::Address; - use fvm_shared::econ::TokenAmount; - use ipc_api::cross::IpcEnvelope; - use ipc_api::staking::{StakingChange, StakingChangeRequest, StakingOperation}; - use ipc_api::subnet_id::SubnetID; - use ipc_provider::manager::{GetBlockHashResult, TopDownQueryPayload}; - use std::sync::atomic::{AtomicUsize, Ordering}; - use std::sync::Arc; - use std::time::Duration; - - /// Creates a mock of a new parent blockchain view. The key is the height and the value is the - /// block hash. If block hash is None, it means the current height is a null block. - macro_rules! new_parent_blocks { - ($($key:expr => $val:expr),* ,) => ( - hash_map!($($key => $val),*) - ); - ($($key:expr => $val:expr),*) => ({ - let mut map = SequentialKeyCache::sequential(); - $( map.append($key, $val).unwrap(); )* - map - }); - } - - struct TestParentProxy { - blocks: SequentialKeyCache>, - } - - #[async_trait] - impl ParentQueryProxy for TestParentProxy { - async fn get_chain_head_height(&self) -> anyhow::Result { - Ok(self.blocks.upper_bound().unwrap()) - } - - async fn get_genesis_epoch(&self) -> anyhow::Result { - Ok(self.blocks.lower_bound().unwrap() - 1) - } - - async fn get_block_hash(&self, height: BlockHeight) -> anyhow::Result { - let r = self.blocks.get_value(height).unwrap(); - if r.is_none() { - return Err(anyhow!(NULL_ROUND_ERR_MSG)); - } - - for h in (self.blocks.lower_bound().unwrap()..height).rev() { - let v = self.blocks.get_value(h).unwrap(); - if v.is_none() { - continue; - } - return Ok(GetBlockHashResult { - parent_block_hash: v.clone().unwrap().0, - block_hash: r.clone().unwrap().0, - }); - } - panic!("invalid testing data") - } - - async fn get_top_down_msgs( - &self, - height: BlockHeight, - ) -> anyhow::Result>> { - let r = self.blocks.get_value(height).cloned().unwrap(); - if r.is_none() { - return Err(anyhow!(NULL_ROUND_ERR_MSG)); - } - let r = r.unwrap(); - Ok(TopDownQueryPayload { - value: r.2, - block_hash: r.0, - }) - } - - async fn get_validator_changes( - &self, - height: BlockHeight, - ) -> anyhow::Result>> { - let r = self.blocks.get_value(height).cloned().unwrap(); - if r.is_none() { - return Err(anyhow!(NULL_ROUND_ERR_MSG)); - } - let r = r.unwrap(); - Ok(TopDownQueryPayload { - value: r.1, - block_hash: r.0, - }) - } - } - - fn new_provider( - blocks: SequentialKeyCache>, - ) -> CachedFinalityProvider { - let config = Config { - chain_head_delay: 2, - polling_interval: Default::default(), - exponential_back_off: Default::default(), - exponential_retry_limit: 0, - max_proposal_range: Some(1), - max_cache_blocks: None, - proposal_delay: None, - }; - let genesis_epoch = blocks.lower_bound().unwrap(); - let proxy = Arc::new(TestParentProxy { blocks }); - let committed_finality = IPCParentFinality { - height: genesis_epoch, - block_hash: vec![0; 32], - }; - - CachedFinalityProvider::new(config, genesis_epoch, Some(committed_finality), proxy) - } - - fn new_cross_msg(nonce: u64) -> IpcEnvelope { - let subnet_id = SubnetID::new(10, vec![Address::new_id(1000)]); - let mut msg = IpcEnvelope::new_fund_msg( - &subnet_id, - &Address::new_id(1), - &Address::new_id(2), - TokenAmount::from_atto(100), - ) - .unwrap(); - msg.nonce = nonce; - msg - } - - fn new_validator_changes(configuration_number: u64) -> StakingChangeRequest { - StakingChangeRequest { - configuration_number, - change: StakingChange { - op: StakingOperation::Deposit, - payload: vec![], - validator: Address::new_id(1), - }, - } - } - - #[tokio::test] - async fn test_retry() { - struct Test { - nums_run: AtomicUsize, - } - - impl Test { - async fn run(&self) -> Result<(), &'static str> { - self.nums_run.fetch_add(1, Ordering::SeqCst); - Err("mocked error") - } - } - - let t = Test { - nums_run: AtomicUsize::new(0), - }; - - let res = retry!(Duration::from_secs(1), 2, t.run().await); - assert!(res.is_err()); - // execute the first time, retries twice - assert_eq!(t.nums_run.load(Ordering::SeqCst), 3); - } - - #[tokio::test] - async fn test_query_topdown_msgs() { - let parent_blocks = new_parent_blocks!( - 100 => Some((vec![0; 32], vec![], vec![new_cross_msg(0)])), // genesis block - 101 => Some((vec![1; 32], vec![], vec![new_cross_msg(1)])), - 102 => Some((vec![2; 32], vec![], vec![new_cross_msg(2)])), - 103 => Some((vec![3; 32], vec![], vec![new_cross_msg(3)])), - 104 => None, - 105 => None, - 106 => Some((vec![6; 32], vec![], vec![new_cross_msg(6)])) - ); - let provider = new_provider(parent_blocks); - let messages = provider.top_down_msgs_from(100, 106).await.unwrap(); - - assert_eq!( - messages, - vec![ - new_cross_msg(0), - new_cross_msg(1), - new_cross_msg(2), - new_cross_msg(3), - new_cross_msg(6), - ] - ) - } - - #[tokio::test] - async fn test_query_validator_changes() { - let parent_blocks = new_parent_blocks!( - 100 => Some((vec![0; 32], vec![new_validator_changes(0)], vec![])), // genesis block - 101 => Some((vec![1; 32], vec![new_validator_changes(1)], vec![])), - 102 => Some((vec![2; 32], vec![], vec![])), - 103 => Some((vec![3; 32], vec![new_validator_changes(3)], vec![])), - 104 => None, - 105 => None, - 106 => Some((vec![6; 32], vec![new_validator_changes(6)], vec![])) - ); - let provider = new_provider(parent_blocks); - let messages = provider.validator_changes_from(100, 106).await.unwrap(); - - assert_eq!(messages.len(), 4) - } -} diff --git a/fendermint/vm/topdown/src/finality/mod.rs b/fendermint/vm/topdown/src/finality/mod.rs deleted file mode 100644 index c6cd2dc3d..000000000 --- a/fendermint/vm/topdown/src/finality/mod.rs +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2022-2024 Protocol Labs -// SPDX-License-Identifier: Apache-2.0, MIT - -mod fetch; -mod null; - -use crate::error::Error; -use crate::BlockHash; -use async_stm::{abort, StmResult}; -use ipc_api::cross::IpcEnvelope; -use ipc_api::staking::StakingChangeRequest; - -pub use fetch::CachedFinalityProvider; - -pub(crate) type ParentViewPayload = (BlockHash, Vec, Vec); - -fn ensure_sequential u64>(msgs: &[T], f: F) -> StmResult<(), Error> { - if msgs.is_empty() { - return Ok(()); - } - - let first = msgs.first().unwrap(); - let mut nonce = f(first); - for msg in msgs.iter().skip(1) { - if nonce + 1 != f(msg) { - return abort(Error::NotSequential); - } - nonce += 1; - } - - Ok(()) -} - -pub(crate) fn validator_changes(p: &ParentViewPayload) -> Vec { - p.1.clone() -} - -pub(crate) fn topdown_cross_msgs(p: &ParentViewPayload) -> Vec { - p.2.clone() -} - -#[cfg(test)] -mod tests { - use crate::proxy::ParentQueryProxy; - use crate::{ - BlockHeight, CachedFinalityProvider, Config, IPCParentFinality, ParentFinalityProvider, - }; - use async_stm::atomically_or_err; - use async_trait::async_trait; - use ipc_api::cross::IpcEnvelope; - use ipc_api::staking::StakingChangeRequest; - use ipc_provider::manager::{GetBlockHashResult, TopDownQueryPayload}; - use std::sync::Arc; - use tokio::time::Duration; - - struct MockedParentQuery; - - #[async_trait] - impl ParentQueryProxy for MockedParentQuery { - async fn get_chain_head_height(&self) -> anyhow::Result { - Ok(1) - } - - async fn get_genesis_epoch(&self) -> anyhow::Result { - Ok(10) - } - - async fn get_block_hash(&self, _height: BlockHeight) -> anyhow::Result { - Ok(GetBlockHashResult::default()) - } - - async fn get_top_down_msgs( - &self, - _height: BlockHeight, - ) -> anyhow::Result>> { - Ok(TopDownQueryPayload { - value: vec![], - block_hash: vec![], - }) - } - - async fn get_validator_changes( - &self, - _height: BlockHeight, - ) -> anyhow::Result>> { - Ok(TopDownQueryPayload { - value: vec![], - block_hash: vec![], - }) - } - } - - fn mocked_agent_proxy() -> Arc { - Arc::new(MockedParentQuery) - } - - fn genesis_finality() -> IPCParentFinality { - IPCParentFinality { - height: 0, - block_hash: vec![0; 32], - } - } - - fn new_provider() -> CachedFinalityProvider { - let config = Config { - chain_head_delay: 20, - polling_interval: Duration::from_secs(10), - exponential_back_off: Duration::from_secs(10), - exponential_retry_limit: 10, - max_proposal_range: None, - max_cache_blocks: None, - proposal_delay: None, - }; - - CachedFinalityProvider::new(config, 10, Some(genesis_finality()), mocked_agent_proxy()) - } - - #[tokio::test] - async fn test_finality_works() { - let provider = new_provider(); - - atomically_or_err(|| { - // inject data - for i in 10..=100 { - provider.new_parent_view(i, Some((vec![1u8; 32], vec![], vec![])))?; - } - - let target_block = 120; - let finality = IPCParentFinality { - height: target_block, - block_hash: vec![1u8; 32], - }; - provider.set_new_finality(finality.clone(), Some(genesis_finality()))?; - - // all cache should be cleared - let r = provider.next_proposal()?; - assert!(r.is_none()); - - let f = provider.last_committed_finality()?; - assert_eq!(f, Some(finality)); - - Ok(()) - }) - .await - .unwrap(); - } - - #[tokio::test] - async fn test_check_proposal_works() { - let provider = new_provider(); - - atomically_or_err(|| { - let target_block = 100; - - // inject data - provider.new_parent_view(target_block, Some((vec![1u8; 32], vec![], vec![])))?; - provider.set_new_finality( - IPCParentFinality { - height: target_block - 1, - block_hash: vec![1u8; 32], - }, - Some(genesis_finality()), - )?; - - let finality = IPCParentFinality { - height: target_block, - block_hash: vec![1u8; 32], - }; - - assert!(provider.check_proposal(&finality).is_ok()); - - Ok(()) - }) - .await - .unwrap(); - } -} diff --git a/fendermint/vm/topdown/src/finality/null.rs b/fendermint/vm/topdown/src/finality/null.rs deleted file mode 100644 index 9a4a7beea..000000000 --- a/fendermint/vm/topdown/src/finality/null.rs +++ /dev/null @@ -1,566 +0,0 @@ -// Copyright 2022-2024 Protocol Labs -// SPDX-License-Identifier: Apache-2.0, MIT - -use crate::finality::{ - ensure_sequential, topdown_cross_msgs, validator_changes, ParentViewPayload, -}; -use crate::{BlockHash, BlockHeight, Config, Error, IPCParentFinality, SequentialKeyCache}; -use async_stm::{abort, atomically, Stm, StmResult, TVar}; -use ipc_api::cross::IpcEnvelope; -use ipc_api::staking::StakingChangeRequest; -use std::cmp::min; - -use fendermint_tracing::emit; -use fendermint_vm_event::ParentFinalityCommitted; - -/// Finality provider that can handle null blocks -#[derive(Clone)] -pub struct FinalityWithNull { - config: Config, - genesis_epoch: BlockHeight, - /// Cached data that always syncs with the latest parent chain proactively - cached_data: TVar>>, - /// This is a in memory view of the committed parent finality. We need this as a starting point - /// for populating the cache - last_committed_finality: TVar>, -} - -impl FinalityWithNull { - pub fn new( - config: Config, - genesis_epoch: BlockHeight, - committed_finality: Option, - ) -> Self { - Self { - config, - genesis_epoch, - cached_data: TVar::new(SequentialKeyCache::sequential()), - last_committed_finality: TVar::new(committed_finality), - } - } - - pub fn genesis_epoch(&self) -> anyhow::Result { - Ok(self.genesis_epoch) - } - - pub async fn validator_changes( - &self, - height: BlockHeight, - ) -> anyhow::Result>> { - let r = atomically(|| self.handle_null_block(height, validator_changes, Vec::new)).await; - Ok(r) - } - - pub async fn top_down_msgs( - &self, - height: BlockHeight, - ) -> anyhow::Result>> { - let r = atomically(|| self.handle_null_block(height, topdown_cross_msgs, Vec::new)).await; - Ok(r) - } - - pub fn last_committed_finality(&self) -> Stm> { - self.last_committed_finality.read_clone() - } - - /// Clear the cache and set the committed finality to the provided value - pub fn reset(&self, finality: IPCParentFinality) -> Stm<()> { - self.cached_data.write(SequentialKeyCache::sequential())?; - self.last_committed_finality.write(Some(finality)) - } - - pub fn new_parent_view( - &self, - height: BlockHeight, - maybe_payload: Option, - ) -> StmResult<(), Error> { - if let Some((block_hash, validator_changes, top_down_msgs)) = maybe_payload { - self.parent_block_filled(height, block_hash, validator_changes, top_down_msgs) - } else { - self.parent_null_round(height) - } - } - - pub fn next_proposal(&self) -> Stm> { - let height = if let Some(h) = self.propose_next_height()? { - h - } else { - return Ok(None); - }; - - // safe to unwrap as we make sure null height will not be proposed - let block_hash = self.block_hash_at_height(height)?.unwrap(); - - let proposal = IPCParentFinality { height, block_hash }; - tracing::debug!(proposal = proposal.to_string(), "new proposal"); - Ok(Some(proposal)) - } - - pub fn check_proposal(&self, proposal: &IPCParentFinality) -> Stm { - if !self.check_height(proposal)? { - return Ok(false); - } - self.check_block_hash(proposal) - } - - pub fn set_new_finality( - &self, - finality: IPCParentFinality, - previous_finality: Option, - ) -> Stm<()> { - debug_assert!(previous_finality == self.last_committed_finality.read_clone()?); - - // the height to clear - let height = finality.height; - - self.cached_data.update(|mut cache| { - // only remove cache below height, but not at height, as we have delayed execution - cache.remove_key_below(height); - cache - })?; - - let hash = hex::encode(&finality.block_hash); - - self.last_committed_finality.write(Some(finality))?; - - // emit event only after successful write - emit!(ParentFinalityCommitted { - block_height: height, - block_hash: &hash - }); - - Ok(()) - } -} - -impl FinalityWithNull { - /// Returns the number of blocks cached. - pub(crate) fn cached_blocks(&self) -> Stm { - let cache = self.cached_data.read()?; - Ok(cache.size() as BlockHeight) - } - - pub(crate) fn block_hash_at_height(&self, height: BlockHeight) -> Stm> { - if let Some(f) = self.last_committed_finality.read()?.as_ref() { - if f.height == height { - return Ok(Some(f.block_hash.clone())); - } - } - - self.get_at_height(height, |i| i.0.clone()) - } - - pub(crate) fn latest_height_in_cache(&self) -> Stm> { - let cache = self.cached_data.read()?; - Ok(cache.upper_bound()) - } - - /// Get the latest height tracked in the provider, includes both cache and last committed finality - pub(crate) fn latest_height(&self) -> Stm> { - let h = if let Some(h) = self.latest_height_in_cache()? { - h - } else if let Some(p) = self.last_committed_finality()? { - p.height - } else { - return Ok(None); - }; - Ok(Some(h)) - } - - /// Get the first non-null block in the range of earliest cache block till the height specified, inclusive. - pub(crate) fn first_non_null_block(&self, height: BlockHeight) -> Stm> { - let cache = self.cached_data.read()?; - Ok(cache.lower_bound().and_then(|lower_bound| { - for h in (lower_bound..=height).rev() { - if let Some(Some(_)) = cache.get_value(h) { - return Some(h); - } - } - None - })) - } -} - -/// All the private functions -impl FinalityWithNull { - fn propose_next_height(&self) -> Stm> { - let latest_height = if let Some(h) = self.latest_height_in_cache()? { - h - } else { - tracing::debug!("no proposal yet as height not available"); - return Ok(None); - }; - - let last_committed_height = if let Some(h) = self.last_committed_finality.read_clone()? { - h.height - } else { - unreachable!("last committed finality will be available at this point"); - }; - - let max_proposal_height = last_committed_height + self.config.max_proposal_range(); - let candidate_height = min(max_proposal_height, latest_height); - tracing::debug!(max_proposal_height, candidate_height, "propose heights"); - - let first_non_null_height = if let Some(h) = self.first_non_null_block(candidate_height)? { - h - } else { - tracing::debug!(height = candidate_height, "no non-null block found before"); - return Ok(None); - }; - - tracing::debug!(first_non_null_height, candidate_height); - // an extra layer of delay - let maybe_proposal_height = - self.first_non_null_block(first_non_null_height - self.config.proposal_delay())?; - tracing::debug!( - delayed_height = maybe_proposal_height, - delay = self.config.proposal_delay() - ); - if let Some(proposal_height) = maybe_proposal_height { - // this is possible due to delayed execution as the proposed height's data cannot be - // executed because they have yet to be executed. - return if last_committed_height == proposal_height { - tracing::debug!( - last_committed_height, - proposal_height, - "no new blocks from cache, not proposing" - ); - Ok(None) - } else { - tracing::debug!(proposal_height, "new proposal height"); - Ok(Some(proposal_height)) - }; - } - - tracing::debug!(last_committed_height, "no non-null block after delay"); - Ok(None) - } - - fn handle_null_block T, D: Fn() -> T>( - &self, - height: BlockHeight, - f: F, - d: D, - ) -> Stm> { - let cache = self.cached_data.read()?; - Ok(cache.get_value(height).map(|v| { - if let Some(i) = v.as_ref() { - f(i) - } else { - tracing::debug!(height, "a null round detected, return default"); - d() - } - })) - } - - fn get_at_height T>( - &self, - height: BlockHeight, - f: F, - ) -> Stm> { - let cache = self.cached_data.read()?; - Ok(if let Some(Some(v)) = cache.get_value(height) { - Some(f(v)) - } else { - None - }) - } - - fn parent_block_filled( - &self, - height: BlockHeight, - block_hash: BlockHash, - validator_changes: Vec, - top_down_msgs: Vec, - ) -> StmResult<(), Error> { - if !top_down_msgs.is_empty() { - // make sure incoming top down messages are ordered by nonce sequentially - tracing::debug!(?top_down_msgs); - ensure_sequential(&top_down_msgs, |msg| msg.nonce)?; - }; - if !validator_changes.is_empty() { - tracing::debug!(?validator_changes, "validator changes"); - ensure_sequential(&validator_changes, |change| change.configuration_number)?; - } - - let r = self.cached_data.modify(|mut cache| { - let r = cache - .append(height, Some((block_hash, validator_changes, top_down_msgs))) - .map_err(Error::NonSequentialParentViewInsert); - (cache, r) - })?; - - if let Err(e) = r { - return abort(e); - } - - Ok(()) - } - - /// When there is a new parent view, but it is actually a null round, call this function. - fn parent_null_round(&self, height: BlockHeight) -> StmResult<(), Error> { - let r = self.cached_data.modify(|mut cache| { - let r = cache - .append(height, None) - .map_err(Error::NonSequentialParentViewInsert); - (cache, r) - })?; - - if let Err(e) = r { - return abort(e); - } - - Ok(()) - } - - fn check_height(&self, proposal: &IPCParentFinality) -> Stm { - let binding = self.last_committed_finality.read()?; - // last committed finality is not ready yet, we don't vote, just reject - let last_committed_finality = if let Some(f) = binding.as_ref() { - f - } else { - return Ok(false); - }; - - // the incoming proposal has height already committed, reject - if last_committed_finality.height >= proposal.height { - tracing::debug!( - last_committed = last_committed_finality.height, - proposed = proposal.height, - "proposed height already committed", - ); - return Ok(false); - } - - if let Some(latest_height) = self.latest_height_in_cache()? { - let r = latest_height >= proposal.height; - tracing::debug!( - is_true = r, - latest_height, - proposal = proposal.height.to_string(), - "incoming proposal height seen?" - ); - // requires the incoming height cannot be more advanced than our trusted parent node - Ok(r) - } else { - // latest height is not found, meaning we dont have any prefetched cache, we just be - // strict and vote no simply because we don't know. - tracing::debug!( - proposal = proposal.height.to_string(), - "reject proposal, no data in cache" - ); - Ok(false) - } - } - - fn check_block_hash(&self, proposal: &IPCParentFinality) -> Stm { - Ok( - if let Some(block_hash) = self.block_hash_at_height(proposal.height)? { - let r = block_hash == proposal.block_hash; - tracing::debug!(proposal = proposal.to_string(), is_same = r, "same hash?"); - r - } else { - tracing::debug!(proposal = proposal.to_string(), "reject, hash not found"); - false - }, - ) - } -} - -#[cfg(test)] -mod tests { - use super::FinalityWithNull; - use crate::finality::ParentViewPayload; - use crate::{BlockHeight, Config, IPCParentFinality}; - use async_stm::{atomically, atomically_or_err}; - - async fn new_provider( - mut blocks: Vec<(BlockHeight, Option)>, - ) -> FinalityWithNull { - let config = Config { - chain_head_delay: 2, - polling_interval: Default::default(), - exponential_back_off: Default::default(), - exponential_retry_limit: 0, - max_proposal_range: Some(6), - max_cache_blocks: None, - proposal_delay: Some(2), - }; - let committed_finality = IPCParentFinality { - height: blocks[0].0, - block_hash: vec![0; 32], - }; - - blocks.remove(0); - - let f = FinalityWithNull::new(config, 1, Some(committed_finality)); - for (h, p) in blocks { - atomically_or_err(|| f.new_parent_view(h, p.clone())) - .await - .unwrap(); - } - f - } - - #[tokio::test] - async fn test_happy_path() { - // max_proposal_range is 6. proposal_delay is 2 - let parent_blocks = vec![ - (100, Some((vec![0; 32], vec![], vec![]))), // last committed block - (101, Some((vec![1; 32], vec![], vec![]))), // cache start - (102, Some((vec![2; 32], vec![], vec![]))), - (103, Some((vec![3; 32], vec![], vec![]))), - (104, Some((vec![4; 32], vec![], vec![]))), // final delayed height + proposal height - (105, Some((vec![5; 32], vec![], vec![]))), - (106, Some((vec![6; 32], vec![], vec![]))), // max proposal height (last committed + 6), first non null block - (107, Some((vec![7; 32], vec![], vec![]))), // cache latest height - ]; - let provider = new_provider(parent_blocks).await; - - let f = IPCParentFinality { - height: 104, - block_hash: vec![4; 32], - }; - assert_eq!( - atomically(|| provider.next_proposal()).await, - Some(f.clone()) - ); - - // Test set new finality - atomically(|| { - let last = provider.last_committed_finality.read_clone()?; - provider.set_new_finality(f.clone(), last) - }) - .await; - - assert_eq!( - atomically(|| provider.last_committed_finality()).await, - Some(f.clone()) - ); - - // this ensures sequential insertion is still valid - atomically_or_err(|| provider.new_parent_view(108, None)) - .await - .unwrap(); - } - - #[tokio::test] - async fn test_not_enough_view() { - // max_proposal_range is 6. proposal_delay is 2 - let parent_blocks = vec![ - (100, Some((vec![0; 32], vec![], vec![]))), // last committed block - (101, Some((vec![1; 32], vec![], vec![]))), - (102, Some((vec![2; 32], vec![], vec![]))), - (103, Some((vec![3; 32], vec![], vec![]))), // delayed height + final height - (104, Some((vec![4; 32], vec![], vec![]))), - (105, Some((vec![4; 32], vec![], vec![]))), // cache latest height, first non null block - // max proposal height is 106 - ]; - let provider = new_provider(parent_blocks).await; - - assert_eq!( - atomically(|| provider.next_proposal()).await, - Some(IPCParentFinality { - height: 103, - block_hash: vec![3; 32] - }) - ); - } - - #[tokio::test] - async fn test_with_all_null_blocks() { - // max_proposal_range is 10. proposal_delay is 2 - let parent_blocks = vec![ - (102, Some((vec![2; 32], vec![], vec![]))), // last committed block - (103, None), - (104, None), - (105, None), - (106, None), - (107, None), - (108, None), - (109, None), - (110, Some((vec![4; 32], vec![], vec![]))), // cache latest height - // max proposal height is 112 - ]; - let mut provider = new_provider(parent_blocks).await; - provider.config.max_proposal_range = Some(8); - - assert_eq!(atomically(|| provider.next_proposal()).await, None); - } - - #[tokio::test] - async fn test_with_partially_null_blocks_i() { - // max_proposal_range is 10. proposal_delay is 2 - let parent_blocks = vec![ - (102, Some((vec![2; 32], vec![], vec![]))), // last committed block - (103, None), - (104, None), // we wont have a proposal because after delay, there is no more non-null proposal - (105, None), - (106, None), - (107, None), - (108, None), // delayed block - (109, Some((vec![8; 32], vec![], vec![]))), - (110, Some((vec![10; 32], vec![], vec![]))), // cache latest height, first non null block - // max proposal height is 112 - ]; - let mut provider = new_provider(parent_blocks).await; - provider.config.max_proposal_range = Some(10); - - assert_eq!(atomically(|| provider.next_proposal()).await, None); - } - - #[tokio::test] - async fn test_with_partially_null_blocks_ii() { - // max_proposal_range is 10. proposal_delay is 2 - let parent_blocks = vec![ - (102, Some((vec![2; 32], vec![], vec![]))), // last committed block - (103, Some((vec![3; 32], vec![], vec![]))), - (104, None), - (105, None), - (106, None), - (107, Some((vec![7; 32], vec![], vec![]))), // first non null after delay - (108, None), // delayed block - (109, None), - (110, Some((vec![10; 32], vec![], vec![]))), // cache latest height, first non null block - // max proposal height is 112 - ]; - let mut provider = new_provider(parent_blocks).await; - provider.config.max_proposal_range = Some(10); - - assert_eq!( - atomically(|| provider.next_proposal()).await, - Some(IPCParentFinality { - height: 107, - block_hash: vec![7; 32] - }) - ); - } - - #[tokio::test] - async fn test_with_partially_null_blocks_iii() { - let parent_blocks = vec![ - (102, Some((vec![2; 32], vec![], vec![]))), // last committed block - (103, Some((vec![3; 32], vec![], vec![]))), - (104, None), - (105, None), - (106, None), - (107, Some((vec![7; 32], vec![], vec![]))), // first non null delayed block, final - (108, None), // delayed block - (109, None), - (110, Some((vec![10; 32], vec![], vec![]))), // first non null block - (111, None), - (112, None), - // max proposal height is 122 - ]; - let mut provider = new_provider(parent_blocks).await; - provider.config.max_proposal_range = Some(20); - - assert_eq!( - atomically(|| provider.next_proposal()).await, - Some(IPCParentFinality { - height: 107, - block_hash: vec![7; 32] - }) - ); - } -} diff --git a/fendermint/vm/topdown/src/launch.rs b/fendermint/vm/topdown/src/launch.rs new file mode 100644 index 000000000..09dfe2b5f --- /dev/null +++ b/fendermint/vm/topdown/src/launch.rs @@ -0,0 +1,237 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use crate::proxy::ParentQueryProxy; +use crate::syncer::{start_parent_syncer, ParentPoller, ParentSyncerConfig, TopDownSyncEvent}; +use crate::vote::gossip::GossipClient; +use crate::vote::payload::PowerUpdates; +use crate::vote::store::InMemoryVoteStore; +use crate::vote::{start_vote_reactor, StartVoteReactorParams}; +use crate::{BlockHeight, Checkpoint, Config, TopdownClient, TopdownProposal}; +use anyhow::anyhow; +use cid::Cid; +use fendermint_crypto::SecretKey; +use fendermint_vm_genesis::{Power, Validator, ValidatorKey}; +use std::future::Future; +use std::sync::Arc; +use std::time::Duration; +use tokio::sync::broadcast; + +/// Run the topdown checkpointing in the background. This consists of two processes: +/// - syncer: +/// - syncs with the parent through RPC endpoint to obtain: +/// - parent block hash/height +/// - topdown messages +/// - validator changes +/// - prepares for topdown observation to be braodcasted +/// - voting: +/// - signs/certifies and broadcast topdown observation to p2p peers +/// - listens to certified topdown observation from p2p +/// - aggregate peer certified observations into a quorum certificate for commitment in fendermint +pub async fn run_topdown( + query: CheckpointQuery, + config: Config, + validator_key: SecretKey, + gossip_client: Gossip, + parent_client: ParentClient, + poller_fn: impl FnOnce( + &Checkpoint, + ParentClient, + ParentSyncerConfig, + broadcast::Sender, + ) -> Poller, +) -> anyhow::Result +where + CheckpointQuery: LaunchQuery + Send + Sync + 'static, + Gossip: GossipClient + Send + Sync + 'static, + Poller: ParentPoller + Send + Sync + 'static, + ParentClient: ParentQueryProxy + Send + Sync + 'static, +{ + let query = Arc::new(query); + let checkpoint = query_starting_checkpoint(&query, &parent_client).await?; + + let power_table = query_starting_committee(&query).await?; + let power_table = power_table + .into_iter() + .map(|v| { + let vk = ValidatorKey::new(v.public_key.0); + let w = v.power.0; + (vk, w) + }) + .collect::>(); + + let (internal_event_tx, internal_event_rx) = + broadcast::channel(config.syncer.broadcast_channel_size); + + let poller = poller_fn( + &checkpoint, + parent_client, + config.syncer.clone(), + internal_event_tx, + ); + let syncer_client = start_parent_syncer(config.syncer, poller)?; + + let voting_client = start_vote_reactor(StartVoteReactorParams { + config: config.voting, + validator_key, + power_table, + last_finalized_height: checkpoint.target_height(), + latest_child_block: query.latest_chain_block()?, + gossip: gossip_client, + vote_store: InMemoryVoteStore::default(), + internal_event_listener: internal_event_rx, + })?; + + tracing::info!( + finality = checkpoint.to_string(), + "launching parent syncer with last committed checkpoint" + ); + + Ok(TopdownClient { + syncer: syncer_client, + voting: voting_client, + }) +} + +/// Queries the starting finality for polling. First checks the committed finality, if none, that +/// means the chain has just started, then query from the parent to get the genesis epoch. +pub async fn query_starting_checkpoint( + query: &Arc, + parent_client: &P, +) -> anyhow::Result +where + T: LaunchQuery + Send + Sync + 'static, + P: ParentQueryProxy + Send + Sync + 'static, +{ + loop { + let mut checkpoint = match query.get_latest_checkpoint() { + Ok(Some(finality)) => finality, + Ok(None) => { + tracing::debug!("app not ready for query yet"); + tokio::time::sleep(Duration::from_secs(5)).await; + continue; + } + Err(e) => { + tracing::warn!(error = e.to_string(), "cannot get committed finality"); + tokio::time::sleep(Duration::from_secs(5)).await; + continue; + } + }; + tracing::info!( + checkpoint = checkpoint.to_string(), + "latest checkpoint committed" + ); + + // this means there are no previous committed finality yet, we fetch from parent to get + // the genesis epoch of the current subnet and its corresponding block hash. + if checkpoint.target_height() == 0 { + let genesis_epoch = parent_client.get_genesis_epoch().await?; + tracing::debug!(genesis_epoch = genesis_epoch, "obtained genesis epoch"); + let r = parent_client.get_block_hash(genesis_epoch).await?; + tracing::debug!( + block_hash = hex::encode(&r.block_hash), + "obtained genesis block hash", + ); + + checkpoint = Checkpoint::v1(genesis_epoch, r.block_hash, Cid::default().to_bytes()); + tracing::info!( + genesis_checkpoint = checkpoint.to_string(), + "no previous checkpoint committed, fetched from genesis epoch" + ); + } + + return Ok(checkpoint); + } +} + +/// Queries the starting finality for polling. First checks the committed finality, if none, that +/// means the chain has just started, then query from the parent to get the genesis epoch. +pub async fn query_starting_committee(query: &Arc) -> anyhow::Result>> +where + T: LaunchQuery + Send + Sync + 'static, +{ + loop { + match query.get_power_table() { + Ok(Some(power_table)) => return Ok(power_table), + Ok(None) => { + tracing::debug!("app not ready for query yet"); + tokio::time::sleep(Duration::from_secs(5)).await; + continue; + } + Err(e) => { + tracing::warn!(error = e.to_string(), "cannot get comittee"); + tokio::time::sleep(Duration::from_secs(5)).await; + continue; + } + } + } +} + +/// Query the chain for bootstrapping topdown +/// +/// It returns `None` from queries until the ledger has been initialized. +pub trait LaunchQuery { + /// Get the latest committed checkpoint from the state + fn get_latest_checkpoint(&self) -> anyhow::Result>; + /// Get the current committee voting powers. + fn get_power_table(&self) -> anyhow::Result>>>; + /// Get the latest blockchain height, the local/child subnet chain + fn latest_chain_block(&self) -> anyhow::Result; +} + +/// Toggle is needed for initialization because cyclic dependencies in fendermint bootstrap process. +/// Fendermint's App owns TopdownClient, but TopdownClient needs App for chain state. +/// Also Toggle is needed to handle non ipc enabled setups. +#[derive(Clone)] +pub struct Toggle { + inner: Option, +} + +impl Toggle { + pub fn disable() -> Self { + Self { inner: None } + } + + pub fn enable(t: T) -> Self { + Self { inner: Some(t) } + } + + pub fn is_enabled(&self) -> bool { + self.inner.is_some() + } + + async fn perform_or_err< + 'a, + R, + F: Future>, + Fn: FnOnce(&'a T) -> F, + >( + &'a self, + f: Fn, + ) -> anyhow::Result { + let Some(ref inner) = self.inner else { + return Err(anyhow!("topdown not enabled")); + }; + f(inner).await + } +} + +impl Toggle { + pub async fn validate_quorum_proposal(&self, proposal: TopdownProposal) -> anyhow::Result<()> { + self.perform_or_err(|p| p.validate_quorum_proposal(proposal)) + .await + } + + pub async fn find_topdown_proposal(&self) -> anyhow::Result> { + self.perform_or_err(|p| p.find_topdown_proposal()).await + } + + pub async fn parent_finalized(&self, checkpoint: Checkpoint) -> anyhow::Result<()> { + self.perform_or_err(|p| p.parent_finalized(checkpoint)) + .await + } + + pub async fn update_power_table(&self, updates: PowerUpdates) -> anyhow::Result<()> { + self.perform_or_err(|p| p.update_power_table(updates)).await + } +} diff --git a/fendermint/vm/topdown/src/lib.rs b/fendermint/vm/topdown/src/lib.rs index 148e5a958..915a262dc 100644 --- a/fendermint/vm/topdown/src/lib.rs +++ b/fendermint/vm/topdown/src/lib.rs @@ -3,14 +3,11 @@ mod cache; mod error; -mod finality; -pub mod sync; pub mod convert; pub mod proxy; -mod toggle; -pub mod voting; +pub mod launch; pub mod observation; pub mod observe; pub mod syncer; @@ -25,15 +22,15 @@ use ipc_api::cross::IpcEnvelope; use ipc_api::staking::StakingChangeRequest; use serde::{Deserialize, Serialize}; use std::fmt::{Display, Formatter}; +use std::sync::Arc; use std::time::Duration; pub use crate::cache::{SequentialAppendError, SequentialKeyCache, ValueIter}; pub use crate::error::Error; -pub use crate::finality::CachedFinalityProvider; use crate::observation::{LinearizedParentBlockView, Observation}; -use crate::syncer::ParentSyncerReactorClient; -pub use crate::toggle::Toggle; -use crate::vote::VoteReactorClient; +use crate::syncer::{ParentSyncerConfig, ParentSyncerReactorClient}; +use crate::vote::payload::{PowerTable, PowerUpdates}; +use crate::vote::{VoteConfig, VoteReactorClient}; pub type BlockHeight = u64; pub type Bytes = Vec; @@ -48,69 +45,8 @@ pub(crate) const DEFAULT_PROPOSAL_DELAY: BlockHeight = 2; #[derive(Debug, Clone, Deserialize)] pub struct Config { - /// The number of blocks to delay before reporting a height as final on the parent chain. - /// To propose a certain number of epochs delayed from the latest height, we see to be - /// conservative and avoid other from rejecting the proposal because they don't see the - /// height as final yet. - pub chain_head_delay: BlockHeight, - /// Parent syncing cron period, in seconds - pub polling_interval: Duration, - /// Top down exponential back off retry base - pub exponential_back_off: Duration, - /// The max number of retries for exponential backoff before giving up - pub exponential_retry_limit: usize, - /// The max number of blocks one should make the topdown proposal - pub max_proposal_range: Option, - /// Max number of blocks that should be stored in cache - pub max_cache_blocks: Option, - pub proposal_delay: Option, -} - -impl Config { - pub fn new( - chain_head_delay: BlockHeight, - polling_interval: Duration, - exponential_back_off: Duration, - exponential_retry_limit: usize, - ) -> Self { - Self { - chain_head_delay, - polling_interval, - exponential_back_off, - exponential_retry_limit, - max_proposal_range: None, - max_cache_blocks: None, - proposal_delay: None, - } - } - - pub fn with_max_proposal_range(mut self, max_proposal_range: BlockHeight) -> Self { - self.max_proposal_range = Some(max_proposal_range); - self - } - - pub fn with_proposal_delay(mut self, proposal_delay: BlockHeight) -> Self { - self.proposal_delay = Some(proposal_delay); - self - } - - pub fn with_max_cache_blocks(mut self, max_cache_blocks: BlockHeight) -> Self { - self.max_cache_blocks = Some(max_cache_blocks); - self - } - - pub fn max_proposal_range(&self) -> BlockHeight { - self.max_proposal_range - .unwrap_or(DEFAULT_MAX_PROPOSAL_RANGE) - } - - pub fn proposal_delay(&self) -> BlockHeight { - self.proposal_delay.unwrap_or(DEFAULT_PROPOSAL_DELAY) - } - - pub fn max_cache_blocks(&self) -> BlockHeight { - self.max_cache_blocks.unwrap_or(DEFAULT_MAX_CACHE_BLOCK) - } + pub syncer: ParentSyncerConfig, + pub voting: VoteConfig, } /// On-chain data structure representing a topdown checkpoint agreed to by a @@ -121,18 +57,23 @@ pub enum Checkpoint { } /// Topdown proposal as part of fendermint proposal execution -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct TopdownProposal { - cert: ECDSACertificate, - effects: (Vec, Vec), + pub cert: ECDSACertificate, + pub effects: (Vec, Vec), } +#[derive(Clone)] pub struct TopdownClient { syncer: ParentSyncerReactorClient, voting: VoteReactorClient, } impl TopdownClient { + pub async fn validate_quorum_proposal(&self, proposal: TopdownProposal) -> anyhow::Result<()> { + self.voting.check_quorum_cert(Box::new(proposal.cert)).await + } + pub async fn find_topdown_proposal(&self) -> anyhow::Result> { let Some(quorum_cert) = self.voting.find_quorum().await? else { return Ok(None); @@ -206,69 +147,12 @@ impl TopdownClient { self.syncer.finalize_parent_height(checkpoint).await?; Ok(()) } -} - -/// The finality view for IPC parent at certain height. -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] -pub struct IPCParentFinality { - /// The latest chain height - pub height: BlockHeight, - /// The block hash. For FVM, it is a Cid. For Evm, it is bytes32 as one can now potentially - /// deploy a subnet on EVM. - pub block_hash: BlockHash, -} - -impl IPCParentFinality { - pub fn new(height: ChainEpoch, hash: BlockHash) -> Self { - Self { - height: height as BlockHeight, - block_hash: hash, - } - } -} -impl Display for IPCParentFinality { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!( - f, - "IPCParentFinality(height: {}, block_hash: {})", - self.height, - hex::encode(&self.block_hash) - ) + pub async fn update_power_table(&self, updates: PowerUpdates) -> anyhow::Result<()> { + self.voting.update_power_table(updates).await } } -#[async_trait] -pub trait ParentViewProvider { - /// Obtain the genesis epoch of the current subnet in the parent - fn genesis_epoch(&self) -> anyhow::Result; - /// Get the validator changes from and to height. - async fn validator_changes_from( - &self, - from: BlockHeight, - to: BlockHeight, - ) -> anyhow::Result>; - /// Get the top down messages from and to height. - async fn top_down_msgs_from( - &self, - from: BlockHeight, - to: BlockHeight, - ) -> anyhow::Result>; -} - -pub trait ParentFinalityProvider: ParentViewProvider { - /// Latest proposal for parent finality - fn next_proposal(&self) -> Stm>; - /// Check if the target proposal is valid - fn check_proposal(&self, proposal: &IPCParentFinality) -> Stm; - /// Called when finality is committed - fn set_new_finality( - &self, - finality: IPCParentFinality, - previous_finality: Option, - ) -> Stm<()>; -} - /// If res is null round error, returns the default value from f() pub(crate) fn handle_null_round T>( res: anyhow::Result, @@ -294,7 +178,33 @@ pub(crate) fn is_null_round_str(s: &str) -> bool { s.contains(NULL_ROUND_ERR_MSG) } +impl From<&Observation> for Checkpoint { + fn from(value: &Observation) -> Self { + Self::V1(value.clone()) + } +} + +impl Display for Checkpoint { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + Checkpoint::V1(v) => { + write!( + f, + "Checkpoint(version = 1, height = {}, block_hash = {}, effects = {})", + v.parent_height, + hex::encode(&v.parent_hash), + hex::encode(&v.cumulative_effects_comm) + ) + } + } + } +} + impl Checkpoint { + pub fn v1(height: BlockHeight, hash: BlockHash, effects: Bytes) -> Self { + Self::V1(Observation::new(height, hash, effects)) + } + pub fn target_height(&self) -> BlockHeight { match self { Checkpoint::V1(b) => b.parent_height, diff --git a/fendermint/vm/topdown/src/sync/mod.rs b/fendermint/vm/topdown/src/sync/mod.rs deleted file mode 100644 index 0092e8414..000000000 --- a/fendermint/vm/topdown/src/sync/mod.rs +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2022-2024 Protocol Labs -// SPDX-License-Identifier: Apache-2.0, MIT -//! A constant running process that fetch or listener to parent state - -mod syncer; -mod tendermint; - -use crate::proxy::ParentQueryProxy; -use crate::sync::syncer::LotusParentSyncer; -use crate::sync::tendermint::TendermintAwareSyncer; -use crate::voting::VoteTally; -use crate::{CachedFinalityProvider, Config, IPCParentFinality, ParentFinalityProvider, Toggle}; -use anyhow::anyhow; -use async_stm::atomically; -use ethers::utils::hex; -use ipc_ipld_resolver::ValidatorKey; -use std::sync::Arc; -use std::time::Duration; - -use fendermint_vm_genesis::{Power, Validator}; - -use crate::observation::Observation; -pub use syncer::fetch_topdown_events; - -#[derive(Clone, Debug)] -pub enum TopDownSyncEvent { - NodeSyncing, - NewProposal(Box), -} - -/// Query the parent finality from the block chain state. -/// -/// It returns `None` from queries until the ledger has been initialized. -pub trait ParentFinalityStateQuery { - /// Get the latest committed finality from the state - fn get_latest_committed_finality(&self) -> anyhow::Result>; - /// Get the current committee voting powers. - fn get_power_table(&self) -> anyhow::Result>>>; -} - -/// Queries the starting finality for polling. First checks the committed finality, if none, that -/// means the chain has just started, then query from the parent to get the genesis epoch. -async fn query_starting_finality( - query: &Arc, - parent_client: &Arc

, -) -> anyhow::Result -where - T: ParentFinalityStateQuery + Send + Sync + 'static, - P: ParentQueryProxy + Send + Sync + 'static, -{ - loop { - let mut finality = match query.get_latest_committed_finality() { - Ok(Some(finality)) => finality, - Ok(None) => { - tracing::debug!("app not ready for query yet"); - tokio::time::sleep(Duration::from_secs(5)).await; - continue; - } - Err(e) => { - tracing::warn!(error = e.to_string(), "cannot get committed finality"); - tokio::time::sleep(Duration::from_secs(5)).await; - continue; - } - }; - tracing::info!(finality = finality.to_string(), "latest finality committed"); - - // this means there are no previous committed finality yet, we fetch from parent to get - // the genesis epoch of the current subnet and its corresponding block hash. - if finality.height == 0 { - let genesis_epoch = parent_client.get_genesis_epoch().await?; - tracing::debug!(genesis_epoch = genesis_epoch, "obtained genesis epoch"); - let r = parent_client.get_block_hash(genesis_epoch).await?; - tracing::debug!( - block_hash = hex::encode(&r.block_hash), - "obtained genesis block hash", - ); - - finality = IPCParentFinality { - height: genesis_epoch, - block_hash: r.block_hash, - }; - tracing::info!( - genesis_finality = finality.to_string(), - "no previous finality committed, fetched from genesis epoch" - ); - } - - return Ok(finality); - } -} - -/// Queries the starting finality for polling. First checks the committed finality, if none, that -/// means the chain has just started, then query from the parent to get the genesis epoch. -async fn query_starting_comittee(query: &Arc) -> anyhow::Result>> -where - T: ParentFinalityStateQuery + Send + Sync + 'static, -{ - loop { - match query.get_power_table() { - Ok(Some(power_table)) => return Ok(power_table), - Ok(None) => { - tracing::debug!("app not ready for query yet"); - tokio::time::sleep(Duration::from_secs(5)).await; - continue; - } - Err(e) => { - tracing::warn!(error = e.to_string(), "cannot get comittee"); - tokio::time::sleep(Duration::from_secs(5)).await; - continue; - } - } - } -} - -/// Start the polling parent syncer in the background -pub async fn launch_polling_syncer( - query: T, - config: Config, - view_provider: Arc>>, - vote_tally: VoteTally, - parent_client: Arc

, - tendermint_client: C, -) -> anyhow::Result<()> -where - T: ParentFinalityStateQuery + Send + Sync + 'static, - C: tendermint_rpc::Client + Send + Sync + 'static, - P: ParentQueryProxy + Send + Sync + 'static, -{ - if !view_provider.is_enabled() { - return Err(anyhow!("provider not enabled, enable to run syncer")); - } - - let query = Arc::new(query); - let finality = query_starting_finality(&query, &parent_client).await?; - - let power_table = query_starting_comittee(&query).await?; - let power_table = power_table - .into_iter() - .map(|v| { - let vk = ValidatorKey::from(v.public_key.0); - let w = v.power.0; - (vk, w) - }) - .collect::>(); - - atomically(|| { - view_provider.set_new_finality(finality.clone(), None)?; - - vote_tally.set_finalized(finality.height, finality.block_hash.clone(), None, None)?; - vote_tally.set_power_table(power_table.clone())?; - Ok(()) - }) - .await; - - tracing::info!( - finality = finality.to_string(), - "launching parent syncer with last committed finality" - ); - - start_syncing( - config, - view_provider, - vote_tally, - parent_client, - query, - tendermint_client, - ); - - Ok(()) -} - -/// Start the parent finality listener in the background -fn start_syncing( - config: Config, - view_provider: Arc>>, - vote_tally: VoteTally, - parent_proxy: Arc

, - query: Arc, - tendermint_client: C, -) where - T: ParentFinalityStateQuery + Send + Sync + 'static, - C: tendermint_rpc::Client + Send + Sync + 'static, - P: ParentQueryProxy + Send + Sync + 'static, -{ - let mut interval = tokio::time::interval(config.polling_interval); - interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); - - tokio::spawn(async move { - let lotus_syncer = - LotusParentSyncer::new(config, parent_proxy, view_provider, vote_tally, query) - .expect(""); - - let mut tendermint_syncer = TendermintAwareSyncer::new(lotus_syncer, tendermint_client); - - loop { - interval.tick().await; - - if let Err(e) = tendermint_syncer.sync().await { - tracing::error!(error = e.to_string(), "sync with parent encountered error"); - } - } - }); -} diff --git a/fendermint/vm/topdown/src/sync/syncer.rs b/fendermint/vm/topdown/src/sync/syncer.rs deleted file mode 100644 index ee4748058..000000000 --- a/fendermint/vm/topdown/src/sync/syncer.rs +++ /dev/null @@ -1,596 +0,0 @@ -// Copyright 2022-2024 Protocol Labs -// SPDX-License-Identifier: Apache-2.0, MIT -//! The inner type of parent syncer - -use crate::finality::ParentViewPayload; -use crate::proxy::ParentQueryProxy; -use crate::sync::{query_starting_finality, ParentFinalityStateQuery}; -use crate::voting::{self, VoteTally}; -use crate::{ - is_null_round_str, BlockHash, BlockHeight, CachedFinalityProvider, Config, Error, Toggle, -}; -use anyhow::anyhow; -use async_stm::{atomically, atomically_or_err, StmError}; -use ethers::utils::hex; -use libp2p::futures::TryFutureExt; -use std::sync::Arc; -use tracing::instrument; - -use crate::observe::ParentFinalityAcquired; -use ipc_observability::{emit, serde::HexEncodableBlockHash}; - -/// Parent syncer that constantly poll parent. This struct handles lotus null blocks and deferred -/// execution. For ETH based parent, it should work out of the box as well. -pub(crate) struct LotusParentSyncer { - config: Config, - parent_proxy: Arc

, - provider: Arc>>, - vote_tally: VoteTally, - query: Arc, - - /// For testing purposes, we can sync one block at a time. - /// Not part of `Config` as it's a very niche setting; - /// if enabled it would slow down catching up with parent - /// history to a crawl, or one would have to increase - /// the polling frequence to where it's impractical after - /// we have caught up. - sync_many: bool, -} - -impl LotusParentSyncer -where - T: ParentFinalityStateQuery + Send + Sync + 'static, - P: ParentQueryProxy + Send + Sync + 'static, -{ - pub fn new( - config: Config, - parent_proxy: Arc

, - provider: Arc>>, - vote_tally: VoteTally, - query: Arc, - ) -> anyhow::Result { - Ok(Self { - config, - parent_proxy, - provider, - vote_tally, - query, - sync_many: true, - }) - } - - /// Insert the height into cache when we see a new non null block - pub async fn sync(&mut self) -> anyhow::Result<()> { - let chain_head = if let Some(h) = self.finalized_chain_head().await? { - h - } else { - return Ok(()); - }; - - let (mut latest_height_fetched, mut first_non_null_parent_hash) = - self.latest_cached_data().await; - tracing::debug!(chain_head, latest_height_fetched, "syncing heights"); - - if latest_height_fetched > chain_head { - tracing::warn!( - chain_head, - latest_height_fetched, - "chain head went backwards, potential reorg detected from height" - ); - return self.reset().await; - } - - if latest_height_fetched == chain_head { - tracing::debug!( - chain_head, - latest_height_fetched, - "the parent has yet to produce a new block" - ); - return Ok(()); - } - - loop { - if self.exceed_cache_size_limit().await { - tracing::debug!("exceeded cache size limit"); - break; - } - - first_non_null_parent_hash = match self - .poll_next(latest_height_fetched + 1, first_non_null_parent_hash) - .await - { - Ok(h) => h, - Err(Error::ParentChainReorgDetected) => { - tracing::warn!("potential reorg detected, clear cache and retry"); - self.reset().await?; - break; - } - Err(e) => return Err(anyhow!(e)), - }; - - latest_height_fetched += 1; - - if latest_height_fetched == chain_head { - tracing::debug!("reached the tip of the chain"); - break; - } else if !self.sync_many { - break; - } - } - - Ok(()) - } -} - -impl LotusParentSyncer -where - T: ParentFinalityStateQuery + Send + Sync + 'static, - P: ParentQueryProxy + Send + Sync + 'static, -{ - async fn exceed_cache_size_limit(&self) -> bool { - let max_cache_blocks = self.config.max_cache_blocks(); - atomically(|| self.provider.cached_blocks()).await > max_cache_blocks - } - - /// Get the latest data stored in the cache to pull the next block - async fn latest_cached_data(&self) -> (BlockHeight, BlockHash) { - // we are getting the latest height fetched in cache along with the first non null block - // that is stored in cache. - // we are doing two fetches in one `atomically` as if we get the data in two `atomically`, - // the cache might be updated in between the two calls. `atomically` should guarantee atomicity. - atomically(|| { - let latest_height = if let Some(h) = self.provider.latest_height()? { - h - } else { - unreachable!("guaranteed to have latest height, report bug please") - }; - - // first try to get the first non null block before latest_height + 1, i.e. from cache - let prev_non_null_height = - if let Some(height) = self.provider.first_non_null_block(latest_height)? { - tracing::debug!(height, "first non null block in cache"); - height - } else if let Some(p) = self.provider.last_committed_finality()? { - tracing::debug!( - height = p.height, - "first non null block not in cache, use latest finality" - ); - p.height - } else { - unreachable!("guaranteed to have last committed finality, report bug please") - }; - - let hash = if let Some(h) = self.provider.block_hash(prev_non_null_height)? { - h - } else { - unreachable!( - "guaranteed to have hash as the height {} is found", - prev_non_null_height - ) - }; - - Ok((latest_height, hash)) - }) - .await - } - - /// Poll the next block height. Returns finalized and executed block data. - async fn poll_next( - &mut self, - height: BlockHeight, - parent_block_hash: BlockHash, - ) -> Result { - tracing::debug!( - height, - parent_block_hash = hex::encode(&parent_block_hash), - "polling height with parent hash" - ); - - let block_hash_res = match self.parent_proxy.get_block_hash(height).await { - Ok(res) => res, - Err(e) => { - let err = e.to_string(); - if is_null_round_str(&err) { - tracing::debug!( - height, - "detected null round at height, inserted None to cache" - ); - - atomically_or_err::<_, Error, _>(|| { - self.provider.new_parent_view(height, None)?; - self.vote_tally - .add_block(height, None) - .map_err(map_voting_err)?; - Ok(()) - }) - .await?; - - emit(ParentFinalityAcquired { - source: "Parent syncer", - is_null: true, - block_height: height, - block_hash: None, - commitment_hash: None, - num_msgs: 0, - num_validator_changes: 0, - }); - - // Null block received, no block hash for the current height being polled. - // Return the previous parent hash as the non-null block hash. - return Ok(parent_block_hash); - } - return Err(Error::CannotQueryParent( - format!("get_block_hash: {e}"), - height, - )); - } - }; - - if block_hash_res.parent_block_hash != parent_block_hash { - tracing::warn!( - height, - parent_hash = hex::encode(&block_hash_res.parent_block_hash), - previous_hash = hex::encode(&parent_block_hash), - "parent block hash diff than previous hash", - ); - return Err(Error::ParentChainReorgDetected); - } - - let data = self.fetch_data(height, block_hash_res.block_hash).await?; - - tracing::debug!( - height, - staking_requests = data.1.len(), - cross_messages = data.2.len(), - "fetched data" - ); - - atomically_or_err::<_, Error, _>(|| { - // This is here so we see if there is abnormal amount of retries for some reason. - tracing::debug!(height, "adding data to the cache"); - - self.provider.new_parent_view(height, Some(data.clone()))?; - self.vote_tally - .add_block(height, Some(data.0.clone())) - .map_err(map_voting_err)?; - tracing::debug!(height, "non-null block pushed to cache"); - Ok(()) - }) - .await?; - - emit(ParentFinalityAcquired { - source: "Parent syncer", - is_null: false, - block_height: height, - block_hash: Some(HexEncodableBlockHash(data.0.clone())), - // TODO Karel, Willes - when we introduce commitment hash, we should add it here - commitment_hash: None, - num_msgs: data.2.len(), - num_validator_changes: data.1.len(), - }); - - Ok(data.0) - } - - async fn fetch_data( - &self, - height: BlockHeight, - block_hash: BlockHash, - ) -> Result { - fetch_data(self.parent_proxy.as_ref(), height, block_hash).await - } - - async fn finalized_chain_head(&self) -> anyhow::Result> { - let parent_chain_head_height = self.parent_proxy.get_chain_head_height().await?; - // sanity check - if parent_chain_head_height < self.config.chain_head_delay { - tracing::debug!("latest height not more than the chain head delay"); - return Ok(None); - } - - // we consider the chain head finalized only after the `chain_head_delay` - Ok(Some( - parent_chain_head_height - self.config.chain_head_delay, - )) - } - - /// Reset the cache in the face of a reorg - async fn reset(&self) -> anyhow::Result<()> { - let finality = query_starting_finality(&self.query, &self.parent_proxy).await?; - atomically(|| self.provider.reset(finality.clone())).await; - Ok(()) - } -} - -fn map_voting_err(e: StmError) -> StmError { - match e { - StmError::Abort(e) => { - tracing::error!( - error = e.to_string(), - "failed to append block to voting tally" - ); - StmError::Abort(Error::NotSequential) - } - StmError::Control(c) => StmError::Control(c), - } -} - -#[instrument(skip(parent_proxy))] -async fn fetch_data

( - parent_proxy: &P, - height: BlockHeight, - block_hash: BlockHash, -) -> Result -where - P: ParentQueryProxy + Send + Sync + 'static, -{ - let changes_res = parent_proxy - .get_validator_changes(height) - .map_err(|e| Error::CannotQueryParent(format!("get_validator_changes: {e}"), height)); - - let topdown_msgs_res = parent_proxy - .get_top_down_msgs(height) - .map_err(|e| Error::CannotQueryParent(format!("get_top_down_msgs: {e}"), height)); - - let (changes_res, topdown_msgs_res) = tokio::join!(changes_res, topdown_msgs_res); - let (changes_res, topdown_msgs_res) = (changes_res?, topdown_msgs_res?); - - if changes_res.block_hash != block_hash { - tracing::warn!( - height, - change_set_hash = hex::encode(&changes_res.block_hash), - block_hash = hex::encode(&block_hash), - "change set block hash does not equal block hash", - ); - return Err(Error::ParentChainReorgDetected); - } - - if topdown_msgs_res.block_hash != block_hash { - tracing::warn!( - height, - topdown_msgs_hash = hex::encode(&topdown_msgs_res.block_hash), - block_hash = hex::encode(&block_hash), - "topdown messages block hash does not equal block hash", - ); - return Err(Error::ParentChainReorgDetected); - } - - Ok((block_hash, changes_res.value, topdown_msgs_res.value)) -} - -pub async fn fetch_topdown_events

( - parent_proxy: &P, - start_height: BlockHeight, - end_height: BlockHeight, -) -> Result, Error> -where - P: ParentQueryProxy + Send + Sync + 'static, -{ - let mut events = Vec::new(); - for height in start_height..=end_height { - match parent_proxy.get_block_hash(height).await { - Ok(res) => { - let (block_hash, changes, msgs) = - fetch_data(parent_proxy, height, res.block_hash).await?; - - if !(changes.is_empty() && msgs.is_empty()) { - events.push((height, (block_hash, changes, msgs))); - } - } - Err(e) => { - if is_null_round_str(&e.to_string()) { - continue; - } else { - return Err(Error::CannotQueryParent( - format!("get_block_hash: {e}"), - height, - )); - } - } - } - } - Ok(events) -} - -#[cfg(test)] -mod tests { - use crate::proxy::ParentQueryProxy; - use crate::sync::syncer::LotusParentSyncer; - use crate::sync::ParentFinalityStateQuery; - use crate::voting::VoteTally; - use crate::{ - BlockHash, BlockHeight, CachedFinalityProvider, Config, IPCParentFinality, - SequentialKeyCache, Toggle, NULL_ROUND_ERR_MSG, - }; - use anyhow::anyhow; - use async_stm::atomically; - use async_trait::async_trait; - use fendermint_vm_genesis::{Power, Validator}; - use ipc_api::cross::IpcEnvelope; - use ipc_api::staking::StakingChangeRequest; - use ipc_provider::manager::{GetBlockHashResult, TopDownQueryPayload}; - use std::sync::Arc; - - /// How far behind the tip of the chain do we consider blocks final in the tests. - const FINALITY_DELAY: u64 = 2; - - struct TestParentFinalityStateQuery { - latest_finality: IPCParentFinality, - } - - impl ParentFinalityStateQuery for TestParentFinalityStateQuery { - fn get_latest_committed_finality(&self) -> anyhow::Result> { - Ok(Some(self.latest_finality.clone())) - } - fn get_power_table(&self) -> anyhow::Result>>> { - Ok(Some(vec![])) - } - } - - struct TestParentProxy { - blocks: SequentialKeyCache>, - } - - #[async_trait] - impl ParentQueryProxy for TestParentProxy { - async fn get_chain_head_height(&self) -> anyhow::Result { - Ok(self.blocks.upper_bound().unwrap()) - } - - async fn get_genesis_epoch(&self) -> anyhow::Result { - Ok(self.blocks.lower_bound().unwrap() - 1) - } - - async fn get_block_hash(&self, height: BlockHeight) -> anyhow::Result { - let r = self.blocks.get_value(height).unwrap(); - if r.is_none() { - return Err(anyhow!(NULL_ROUND_ERR_MSG)); - } - - for h in (self.blocks.lower_bound().unwrap()..height).rev() { - let v = self.blocks.get_value(h).unwrap(); - if v.is_none() { - continue; - } - return Ok(GetBlockHashResult { - parent_block_hash: v.clone().unwrap(), - block_hash: r.clone().unwrap(), - }); - } - panic!("invalid testing data") - } - - async fn get_top_down_msgs( - &self, - height: BlockHeight, - ) -> anyhow::Result>> { - Ok(TopDownQueryPayload { - value: vec![], - block_hash: self.blocks.get_value(height).cloned().unwrap().unwrap(), - }) - } - - async fn get_validator_changes( - &self, - height: BlockHeight, - ) -> anyhow::Result>> { - Ok(TopDownQueryPayload { - value: vec![], - block_hash: self.blocks.get_value(height).cloned().unwrap().unwrap(), - }) - } - } - - async fn new_syncer( - blocks: SequentialKeyCache>, - sync_many: bool, - ) -> LotusParentSyncer { - let config = Config { - chain_head_delay: FINALITY_DELAY, - polling_interval: Default::default(), - exponential_back_off: Default::default(), - exponential_retry_limit: 0, - max_proposal_range: Some(1), - max_cache_blocks: None, - proposal_delay: None, - }; - let genesis_epoch = blocks.lower_bound().unwrap(); - let proxy = Arc::new(TestParentProxy { blocks }); - let committed_finality = IPCParentFinality { - height: genesis_epoch, - block_hash: vec![0; 32], - }; - - let vote_tally = VoteTally::new( - vec![], - ( - committed_finality.height, - committed_finality.block_hash.clone(), - ), - ); - - let provider = CachedFinalityProvider::new( - config.clone(), - genesis_epoch, - Some(committed_finality.clone()), - proxy.clone(), - ); - let mut syncer = LotusParentSyncer::new( - config, - proxy, - Arc::new(Toggle::enabled(provider)), - vote_tally, - Arc::new(TestParentFinalityStateQuery { - latest_finality: committed_finality, - }), - ) - .unwrap(); - - // Some tests expect to sync one block at a time. - syncer.sync_many = sync_many; - - syncer - } - - /// Creates a mock of a new parent blockchain view. The key is the height and the value is the - /// block hash. If block hash is None, it means the current height is a null block. - macro_rules! new_parent_blocks { - ($($key:expr => $val:expr),* ,) => ( - hash_map!($($key => $val),*) - ); - ($($key:expr => $val:expr),*) => ({ - let mut map = SequentialKeyCache::sequential(); - $( map.append($key, $val).unwrap(); )* - map - }); - } - - #[tokio::test] - async fn happy_path() { - let parent_blocks = new_parent_blocks!( - 100 => Some(vec![0; 32]), // genesis block - 101 => Some(vec![1; 32]), - 102 => Some(vec![2; 32]), - 103 => Some(vec![3; 32]), - 104 => Some(vec![4; 32]), // after chain head delay, we fetch only to here - 105 => Some(vec![5; 32]), - 106 => Some(vec![6; 32]) // chain head - ); - - let mut syncer = new_syncer(parent_blocks, false).await; - - for h in 101..=104 { - syncer.sync().await.unwrap(); - let p = atomically(|| syncer.provider.latest_height()).await; - assert_eq!(p, Some(h)); - } - } - - #[tokio::test] - async fn with_non_null_block() { - let parent_blocks = new_parent_blocks!( - 100 => Some(vec![0; 32]), // genesis block - 101 => None, - 102 => None, - 103 => None, - 104 => Some(vec![4; 32]), - 105 => None, - 106 => None, - 107 => None, - 108 => Some(vec![5; 32]), - 109 => None, - 110 => None, - 111 => None - ); - - let mut syncer = new_syncer(parent_blocks, false).await; - - for h in 101..=109 { - syncer.sync().await.unwrap(); - assert_eq!( - atomically(|| syncer.provider.latest_height()).await, - Some(h) - ); - } - } -} diff --git a/fendermint/vm/topdown/src/sync/tendermint.rs b/fendermint/vm/topdown/src/sync/tendermint.rs deleted file mode 100644 index 22eb47e82..000000000 --- a/fendermint/vm/topdown/src/sync/tendermint.rs +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2022-2024 Protocol Labs -// SPDX-License-Identifier: Apache-2.0, MIT -//! The tendermint aware syncer - -use crate::proxy::ParentQueryProxy; -use crate::sync::syncer::LotusParentSyncer; -use crate::sync::ParentFinalityStateQuery; -use anyhow::Context; - -/// Tendermint aware syncer -pub(crate) struct TendermintAwareSyncer { - inner: LotusParentSyncer, - tendermint_client: C, -} - -impl TendermintAwareSyncer -where - T: ParentFinalityStateQuery + Send + Sync + 'static, - C: tendermint_rpc::Client + Send + Sync + 'static, - P: ParentQueryProxy + Send + Sync + 'static, -{ - pub fn new(inner: LotusParentSyncer, tendermint_client: C) -> Self { - Self { - inner, - tendermint_client, - } - } - - /// Sync with the parent, unless CometBFT is still catching up with the network, - /// in which case we'll get the changes from the subnet peers in the blocks. - pub async fn sync(&mut self) -> anyhow::Result<()> { - if self.is_syncing_peer().await? { - tracing::debug!("syncing with peer, skip parent finality syncing this round"); - return Ok(()); - } - self.inner.sync().await - } - - async fn is_syncing_peer(&self) -> anyhow::Result { - let status: tendermint_rpc::endpoint::status::Response = self - .tendermint_client - .status() - .await - .context("failed to get Tendermint status")?; - Ok(status.sync_info.catching_up) - } -} diff --git a/fendermint/vm/topdown/src/syncer/mod.rs b/fendermint/vm/topdown/src/syncer/mod.rs index 14a325d4f..dbfea2cfe 100644 --- a/fendermint/vm/topdown/src/syncer/mod.rs +++ b/fendermint/vm/topdown/src/syncer/mod.rs @@ -8,6 +8,9 @@ use crate::syncer::payload::ParentBlockView; use crate::syncer::poll::ParentPoll; use crate::syncer::store::ParentViewStore; use crate::{BlockHeight, Checkpoint}; +use anyhow::anyhow; +use async_trait::async_trait; +use serde::Deserialize; use std::time::Duration; use tokio::select; use tokio::sync::{mpsc, oneshot}; @@ -24,6 +27,7 @@ pub enum TopDownSyncEvent { NewProposal(Box), } +#[derive(Debug, Clone, Deserialize)] pub struct ParentSyncerConfig { pub request_channel_size: usize, /// The event broadcast channel buffer size @@ -35,10 +39,6 @@ pub struct ParentSyncerConfig { pub chain_head_delay: BlockHeight, /// Parent syncing cron period, in seconds pub polling_interval: Duration, - /// Top down exponential back off retry base - pub exponential_back_off: Duration, - /// The max number of retries for exponential backoff before giving up - pub exponential_retry_limit: usize, /// Max number of un-finalized parent blocks that should be stored in the store pub max_store_blocks: BlockHeight, /// Attempts to sync as many block as possible till the finalized chain head @@ -52,21 +52,33 @@ pub struct ParentSyncerReactorClient { tx: mpsc::Sender, } -pub fn start_parent_syncer( +/// Polls the parent block view +#[async_trait] +pub trait ParentPoller { + /// The previous checkpoint committed + fn last_checkpoint(&self) -> &Checkpoint; + + /// The target block height is finalized, purge all the parent view before the target height + fn finalize(&mut self, checkpoint: Checkpoint) -> anyhow::Result<()>; + + /// Try to poll the next parent height + async fn try_poll(&mut self) -> anyhow::Result<()>; + + /// Dump the parent block view from the height after the last committed checkpoint to the `to` height + fn dump_parent_block_views( + &self, + to: BlockHeight, + ) -> anyhow::Result>>; +} + +pub fn start_parent_syncer( config: ParentSyncerConfig, - proxy: P, - store: S, - last_finalized: Checkpoint, -) -> anyhow::Result -where - S: ParentViewStore + Send + Sync + 'static, - P: Send + Sync + 'static + ParentQueryProxy, -{ + mut poller: P, +) -> anyhow::Result { let (tx, mut rx) = mpsc::channel(config.request_channel_size); tokio::spawn(async move { let polling_interval = config.polling_interval; - let mut poller = ParentPoll::new(config, proxy, store, last_finalized); loop { select! { @@ -98,7 +110,7 @@ impl ParentSyncerReactorClient { pub async fn query_parent_block_view( &self, to: BlockHeight, - ) -> anyhow::Result>, Error>> { + ) -> anyhow::Result>>> { let (tx, rx) = oneshot::channel(); self.tx .send(ParentSyncerRequest::QueryParentBlockViews { to, tx }) @@ -112,15 +124,14 @@ enum ParentSyncerRequest { Finalized(Checkpoint), QueryParentBlockViews { to: BlockHeight, - tx: oneshot::Sender>, Error>>, + tx: oneshot::Sender>>>, }, } -fn handle_request(req: ParentSyncerRequest, poller: &mut ParentPoll) -where - S: ParentViewStore + Send + Sync + 'static, - P: Send + Sync + 'static + ParentQueryProxy, -{ +fn handle_request( + req: ParentSyncerRequest, + poller: &mut P, +) { match req { ParentSyncerRequest::Finalized(c) => { let height = c.target_height(); @@ -129,27 +140,15 @@ where } } ParentSyncerRequest::QueryParentBlockViews { to, tx } => { - let store = poller.store(); - - let mut r = vec![]; - - let start = poller.last_checkpoint().target_height() + 1; - for h in start..=to { - match store.get(h) { - Ok(v) => r.push(v), - Err(e) => { - tracing::error!( - height = h, - err = e.to_string(), - "cannot query parent block view" - ); - let _ = tx.send(Err(e)); - return; - } - } - } - - let _ = tx.send(Ok(r)); + let r = poller.dump_parent_block_views(to).map_err(|e| { + tracing::error!( + height = to, + err = e.to_string(), + "cannot query parent block view" + ); + anyhow!("cannot read parent block view: {}", e) + }); + let _ = tx.send(r); } } } diff --git a/fendermint/vm/topdown/src/syncer/poll.rs b/fendermint/vm/topdown/src/syncer/poll.rs index f5ce93e4b..8695d61f8 100644 --- a/fendermint/vm/topdown/src/syncer/poll.rs +++ b/fendermint/vm/topdown/src/syncer/poll.rs @@ -7,16 +7,17 @@ use crate::proxy::ParentQueryProxy; use crate::syncer::error::Error; use crate::syncer::payload::ParentBlockView; use crate::syncer::store::ParentViewStore; -use crate::syncer::{ParentSyncerConfig, TopDownSyncEvent}; +use crate::syncer::{ParentPoller, ParentSyncerConfig, TopDownSyncEvent}; use crate::{is_null_round_str, BlockHash, BlockHeight, Checkpoint}; use anyhow::anyhow; +use async_trait::async_trait; use ipc_observability::emit; use ipc_observability::serde::HexEncodableBlockHash; use libp2p::futures::TryFutureExt; use tokio::sync::broadcast; use tracing::instrument; -pub(crate) struct ParentPoll { +pub struct ParentPoll { config: ParentSyncerConfig, parent_proxy: P, store: S, @@ -24,32 +25,18 @@ pub(crate) struct ParentPoll { last_finalized: Checkpoint, } -impl ParentPoll +#[async_trait] +impl ParentPoller for ParentPoll where S: ParentViewStore + Send + Sync + 'static, P: Send + Sync + 'static + ParentQueryProxy, { - pub fn new(config: ParentSyncerConfig, proxy: P, store: S, last_finalized: Checkpoint) -> Self { - let (tx, _) = broadcast::channel(config.broadcast_channel_size); - Self { - config, - parent_proxy: proxy, - store, - event_broadcast: tx, - last_finalized, - } - } - - pub fn store(&self) -> &S { - &self.store - } - - pub fn last_checkpoint(&self) -> &Checkpoint { + fn last_checkpoint(&self) -> &Checkpoint { &self.last_finalized } /// The target block height is finalized, purge all the parent view before the target height - pub fn finalize(&mut self, checkpoint: Checkpoint) -> Result<(), Error> { + fn finalize(&mut self, checkpoint: Checkpoint) -> anyhow::Result<()> { let Some(min_height) = self.store.min_parent_view_height()? else { return Ok(()); }; @@ -59,38 +46,8 @@ where Ok(()) } - /// Get the latest non null block data stored - async fn latest_nonnull_data(&self) -> anyhow::Result<(BlockHeight, BlockHash)> { - let Some(latest_height) = self.store.max_parent_view_height()? else { - return Ok(( - self.last_finalized.target_height(), - self.last_finalized.target_hash().clone(), - )); - }; - - let start = self.last_finalized.target_height() + 1; - for h in (start..=latest_height).rev() { - let Some(p) = self.store.get(h)? else { - continue; - }; - - // if parent hash of the proposal is null, it means the - let Some(p) = p.payload else { - continue; - }; - - return Ok((h, p.parent_hash)); - } - - // this means the votes stored are all null blocks, return last committed finality - Ok(( - self.last_finalized.target_height(), - self.last_finalized.target_hash().clone(), - )) - } - /// Insert the height into cache when we see a new non null block - pub async fn try_poll(&mut self) -> anyhow::Result<()> { + async fn try_poll(&mut self) -> anyhow::Result<()> { let Some(chain_head) = self.finalized_chain_head().await? else { return Ok(()); }; @@ -149,6 +106,73 @@ where Ok(()) } + fn dump_parent_block_views( + &self, + to: BlockHeight, + ) -> anyhow::Result>> { + let store = self.store(); + + let mut r = vec![]; + + let start = self.last_checkpoint().target_height() + 1; + for h in start..=to { + r.push(store.get(h)?) + } + + Ok(r) + } +} + +impl ParentPoll +where + S: ParentViewStore + Send + Sync + 'static, + P: Send + Sync + 'static + ParentQueryProxy, +{ + pub fn new(config: ParentSyncerConfig, proxy: P, store: S, last_finalized: Checkpoint) -> Self { + let (tx, _) = broadcast::channel(config.broadcast_channel_size); + Self { + config, + parent_proxy: proxy, + store, + event_broadcast: tx, + last_finalized, + } + } + + pub fn store(&self) -> &S { + &self.store + } + + /// Get the latest non null block data stored + async fn latest_nonnull_data(&self) -> anyhow::Result<(BlockHeight, BlockHash)> { + let Some(latest_height) = self.store.max_parent_view_height()? else { + return Ok(( + self.last_finalized.target_height(), + self.last_finalized.target_hash().clone(), + )); + }; + + let start = self.last_finalized.target_height() + 1; + for h in (start..=latest_height).rev() { + let Some(p) = self.store.get(h)? else { + continue; + }; + + // if parent hash of the proposal is null, it means the + let Some(p) = p.payload else { + continue; + }; + + return Ok((h, p.parent_hash)); + } + + // this means the votes stored are all null blocks, return last committed finality + Ok(( + self.last_finalized.target_height(), + self.last_finalized.target_hash().clone(), + )) + } + fn store_full(&self) -> anyhow::Result { let Some(h) = self.store.max_parent_view_height()? else { return Ok(false); diff --git a/fendermint/vm/topdown/src/syncer/store.rs b/fendermint/vm/topdown/src/syncer/store.rs index f38b4dccb..ac6eed00c 100644 --- a/fendermint/vm/topdown/src/syncer/store.rs +++ b/fendermint/vm/topdown/src/syncer/store.rs @@ -25,6 +25,14 @@ pub struct InMemoryParentViewStore { inner: SequentialKeyCache, } +impl InMemoryParentViewStore { + pub fn new() -> Self { + Self { + inner: SequentialKeyCache::sequential(), + } + } +} + impl ParentViewStore for InMemoryParentViewStore { fn store(&mut self, view: ParentBlockView) -> Result<(), Error> { self.inner diff --git a/fendermint/vm/topdown/src/toggle.rs b/fendermint/vm/topdown/src/toggle.rs deleted file mode 100644 index c7dd10065..000000000 --- a/fendermint/vm/topdown/src/toggle.rs +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2022-2024 Protocol Labs -// SPDX-License-Identifier: Apache-2.0, MIT - -use crate::finality::ParentViewPayload; -use crate::{ - BlockHash, BlockHeight, CachedFinalityProvider, Error, IPCParentFinality, - ParentFinalityProvider, ParentViewProvider, -}; -use anyhow::anyhow; -use async_stm::{Stm, StmResult}; -use ipc_api::cross::IpcEnvelope; -use ipc_api::staking::StakingChangeRequest; - -/// The parent finality provider could have all functionalities disabled. -#[derive(Clone)] -pub struct Toggle

{ - inner: Option

, -} - -impl

Toggle

{ - pub fn disabled() -> Self { - Self { inner: None } - } - - pub fn enabled(inner: P) -> Self { - Self { inner: Some(inner) } - } - - pub fn is_enabled(&self) -> bool { - self.inner.is_some() - } - - fn perform_or_else(&self, f: F, other: T) -> Result - where - F: FnOnce(&P) -> Result, - { - match &self.inner { - Some(p) => f(p), - None => Ok(other), - } - } -} - -#[async_trait::async_trait] -impl ParentViewProvider for Toggle

{ - fn genesis_epoch(&self) -> anyhow::Result { - match self.inner.as_ref() { - Some(p) => p.genesis_epoch(), - None => Err(anyhow!("provider is toggled off")), - } - } - - async fn validator_changes_from( - &self, - from: BlockHeight, - to: BlockHeight, - ) -> anyhow::Result> { - match self.inner.as_ref() { - Some(p) => p.validator_changes_from(from, to).await, - None => Err(anyhow!("provider is toggled off")), - } - } - - async fn top_down_msgs_from( - &self, - from: BlockHeight, - to: BlockHeight, - ) -> anyhow::Result> { - match self.inner.as_ref() { - Some(p) => p.top_down_msgs_from(from, to).await, - None => Err(anyhow!("provider is toggled off")), - } - } -} - -impl ParentFinalityProvider for Toggle

{ - fn next_proposal(&self) -> Stm> { - self.perform_or_else(|p| p.next_proposal(), None) - } - - fn check_proposal(&self, proposal: &IPCParentFinality) -> Stm { - self.perform_or_else(|p| p.check_proposal(proposal), false) - } - - fn set_new_finality( - &self, - finality: IPCParentFinality, - previous_finality: Option, - ) -> Stm<()> { - self.perform_or_else(|p| p.set_new_finality(finality, previous_finality), ()) - } -} - -impl

Toggle> { - pub fn block_hash(&self, height: BlockHeight) -> Stm> { - self.perform_or_else(|p| p.block_hash(height), None) - } - - pub fn latest_height_in_cache(&self) -> Stm> { - self.perform_or_else(|p| p.latest_height_in_cache(), None) - } - - pub fn latest_height(&self) -> Stm> { - self.perform_or_else(|p| p.latest_height(), None) - } - - pub fn last_committed_finality(&self) -> Stm> { - self.perform_or_else(|p| p.last_committed_finality(), None) - } - - pub fn new_parent_view( - &self, - height: BlockHeight, - maybe_payload: Option, - ) -> StmResult<(), Error> { - self.perform_or_else(|p| p.new_parent_view(height, maybe_payload), ()) - } - - pub fn reset(&self, finality: IPCParentFinality) -> Stm<()> { - self.perform_or_else(|p| p.reset(finality), ()) - } - - pub fn cached_blocks(&self) -> Stm { - self.perform_or_else(|p| p.cached_blocks(), BlockHeight::MAX) - } - - pub fn first_non_null_block(&self, height: BlockHeight) -> Stm> { - self.perform_or_else(|p| p.first_non_null_block(height), None) - } -} diff --git a/fendermint/vm/topdown/src/vote/error.rs b/fendermint/vm/topdown/src/vote/error.rs index aeb22de2f..1b79a6033 100644 --- a/fendermint/vm/topdown/src/vote/error.rs +++ b/fendermint/vm/topdown/src/vote/error.rs @@ -22,4 +22,7 @@ pub enum Error { #[error("validator cannot sign vote")] CannotSignVote, + + #[error("cannot publish vote {0}")] + CannotPublishVote(String), } diff --git a/fendermint/vm/topdown/src/vote/mod.rs b/fendermint/vm/topdown/src/vote/mod.rs index 74885d8d5..7b89a5b6d 100644 --- a/fendermint/vm/topdown/src/vote/mod.rs +++ b/fendermint/vm/topdown/src/vote/mod.rs @@ -9,26 +9,28 @@ pub mod store; mod tally; use crate::observation::{CertifiedObservation, Observation}; -use crate::sync::TopDownSyncEvent; +use crate::syncer::TopDownSyncEvent; use crate::vote::gossip::GossipClient; use crate::vote::operation::{OperationMetrics, OperationStateMachine}; use crate::vote::payload::{PowerUpdates, Vote, VoteTallyState}; use crate::vote::store::VoteStore; use crate::vote::tally::VoteTally; use crate::BlockHeight; +use anyhow::anyhow; use error::Error; use fendermint_crypto::quorum::ECDSACertificate; use fendermint_crypto::SecretKey; use fendermint_vm_genesis::ValidatorKey; use serde::{Deserialize, Serialize}; +use std::borrow::Borrow; use std::collections::HashMap; use std::time::Duration; use tokio::sync::{broadcast, mpsc, oneshot}; pub type Weight = u64; -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct Config { +#[derive(Deserialize, Debug, Clone)] +pub struct VoteConfig { /// The reactor request channel buffer size pub req_channel_buffer_size: usize, /// The number of requests the reactor should process per run before handling other tasks @@ -40,12 +42,13 @@ pub struct Config { } /// The client to interact with the vote reactor +#[derive(Clone)] pub struct VoteReactorClient { tx: mpsc::Sender, } pub struct StartVoteReactorParams { - pub config: Config, + pub config: VoteConfig, pub validator_key: SecretKey, pub power_table: PowerUpdates, pub last_finalized_height: BlockHeight, @@ -166,6 +169,19 @@ impl VoteReactorClient { .await?; Ok(()) } + + pub async fn check_quorum_cert( + &self, + cert: Box>, + ) -> anyhow::Result<()> { + let is_reached = self + .request(|tx| VoteReactorRequest::CheckQuorumCert { tx, cert }) + .await?; + if !is_reached { + return Err(anyhow!("quorum not reached")); + } + Ok(()) + } } enum VoteReactorRequest { @@ -183,6 +199,10 @@ enum VoteReactorRequest { DumpAllVotes(oneshot::Sender>, Error>>), /// Get the current vote tally state variables in vote tally QueryState(oneshot::Sender), + CheckQuorumCert { + cert: Box>, + tx: oneshot::Sender, + }, /// Queries the vote tally to see if there are new quorum formed FindQuorum(oneshot::Sender>>), /// Update power of some validators. If the weight is zero, the validator is removed @@ -215,7 +235,7 @@ struct VotingHandler { internal_event_listener: broadcast::Receiver, vote_tally: VoteTally, latest_child_block: BlockHeight, - config: Config, + config: VoteConfig, } impl VotingHandler @@ -264,6 +284,15 @@ where VoteReactorRequest::NewLocalBlockMined(n) => { self.latest_child_block = n; } + VoteReactorRequest::CheckQuorumCert { cert, tx } => { + if !self.vote_tally.check_quorum_cert(cert.borrow()) { + let _ = tx.send(false); + } else { + let _ = tx.send( + self.vote_tally.last_finalized_height() + 1 == cert.payload().parent_height, + ); + } + } } } diff --git a/fendermint/vm/topdown/src/vote/operation/paused.rs b/fendermint/vm/topdown/src/vote/operation/paused.rs index 33931e4e8..ee86bbf79 100644 --- a/fendermint/vm/topdown/src/vote/operation/paused.rs +++ b/fendermint/vm/topdown/src/vote/operation/paused.rs @@ -1,7 +1,7 @@ // Copyright 2022-2024 Protocol Labs // SPDX-License-Identifier: Apache-2.0, MIT -use crate::sync::TopDownSyncEvent; +use crate::syncer::TopDownSyncEvent; use crate::vote::gossip::GossipClient; use crate::vote::operation::active::ActiveOperationMode; use crate::vote::operation::{OperationMetrics, OperationStateMachine, ACTIVE, PAUSED}; diff --git a/fendermint/vm/topdown/src/vote/tally.rs b/fendermint/vm/topdown/src/vote/tally.rs index 8b27113e1..540133661 100644 --- a/fendermint/vm/topdown/src/vote/tally.rs +++ b/fendermint/vm/topdown/src/vote/tally.rs @@ -89,6 +89,17 @@ impl VoteTally { Ok(votes.into_owned()) } + pub fn check_quorum_cert(&self, cert: &ECDSACertificate) -> bool { + let power_table = self.power_table.iter().map(|(v, w)| (v.public_key(), *w)); + match cert.quorum_reached(power_table, self.quorum_ratio) { + Ok(v) => v, + Err(e) => { + tracing::error!(err = e.to_string(), "check quorum encountered error"); + false + } + } + } + /// Dump all the votes that is currently stored in the vote tally. /// This is generally a very expensive operation, but good for debugging, use with care pub fn dump_votes(&self) -> Result>, Error> { diff --git a/fendermint/vm/topdown/src/voting.rs b/fendermint/vm/topdown/src/voting.rs deleted file mode 100644 index 793c2ab24..000000000 --- a/fendermint/vm/topdown/src/voting.rs +++ /dev/null @@ -1,480 +0,0 @@ -// Copyright 2022-2024 Protocol Labs -// SPDX-License-Identifier: Apache-2.0, MIT - -use async_stm::{abort, atomically_or_err, retry, Stm, StmResult, TVar}; -use serde::{de::DeserializeOwned, Serialize}; -use std::fmt::Display; -use std::hash::Hash; -use std::{fmt::Debug, time::Duration}; - -use crate::observe::{ - ParentFinalityCommitted, ParentFinalityPeerQuorumReached, ParentFinalityPeerVoteReceived, - ParentFinalityPeerVoteSent, -}; -use crate::{BlockHash, BlockHeight}; -use ipc_observability::{emit, serde::HexEncodableBlockHash}; - -// Usign this type because it's `Hash`, unlike the normal `libsecp256k1::PublicKey`. -pub use ipc_ipld_resolver::ValidatorKey; -use ipc_ipld_resolver::VoteRecord; - -pub type Weight = u64; - -#[derive(Debug, thiserror::Error)] -pub enum Error = BlockHash> { - #[error("the last finalized block has not been set")] - Uninitialized, - - #[error("failed to extend chain; expected block height {0}, got {1}")] - UnexpectedBlock(BlockHeight, BlockHeight), - - #[error("validator unknown or has no power: {0:?}")] - UnpoweredValidator(K), - - #[error( - "equivocation by validator {0:?} at height {1}; {} != {}", - hex::encode(.2), - hex::encode(.3) - )] - Equivocation(K, BlockHeight, V, V), -} - -/// Keep track of votes being gossiped about parent chain finality -/// and tally up the weights of the validators on the child subnet, -/// so that we can ask for proposals that are not going to be voted -/// down. -#[derive(Clone)] -pub struct VoteTally { - /// Current validator weights. These are the ones who will vote on the blocks, - /// so these are the weights which need to form a quorum. - power_table: TVar>, - - /// The *finalized mainchain* of the parent as observed by this node. - /// - /// These are assumed to be final because IIRC that's how the syncer works, - /// only fetching the info about blocks which are already sufficiently deep. - /// - /// When we want to propose, all we have to do is walk back this chain and - /// tally the votes we collected for the block hashes until we reach a quorum. - /// - /// The block hash is optional to allow for null blocks on Filecoin rootnet. - chain: TVar>>, - - /// Index votes received by height and hash, which makes it easy to look up - /// all the votes for a given block hash and also to verify that a validator - /// isn't equivocating by trying to vote for two different things at the - /// same height. - votes: TVar>>>, - - /// Adding votes can be paused if we observe that looking for a quorum takes too long - /// and is often retried due to votes being added. - pause_votes: TVar, -} - -impl VoteTally -where - K: Clone + Hash + Eq + Sync + Send + 'static + Debug + Display, - V: AsRef<[u8]> + Clone + Hash + Eq + Sync + Send + 'static, -{ - /// Create an uninitialized instance. Before blocks can be added to it - /// we will have to set the last finalized block. - /// - /// The reason this exists is so that we can delay initialization until - /// after the genesis block has been executed. - pub fn empty() -> Self { - Self { - power_table: TVar::default(), - chain: TVar::default(), - votes: TVar::default(), - pause_votes: TVar::new(false), - } - } - - /// Initialize the vote tally from the current power table - /// and the last finalized block from the ledger. - pub fn new(power_table: Vec<(K, Weight)>, last_finalized_block: (BlockHeight, V)) -> Self { - let (height, hash) = last_finalized_block; - Self { - power_table: TVar::new(im::HashMap::from_iter(power_table)), - chain: TVar::new(im::OrdMap::from_iter([(height, Some(hash))])), - votes: TVar::default(), - pause_votes: TVar::new(false), - } - } - - /// Check that a validator key is currently part of the power table. - pub fn has_power(&self, validator_key: &K) -> Stm { - let pt = self.power_table.read()?; - // For consistency consider validators without power unknown. - match pt.get(validator_key) { - None => Ok(false), - Some(weight) => Ok(*weight > 0), - } - } - - /// Calculate the minimum weight needed for a proposal to pass with the current membership. - /// - /// This is inclusive, that is, if the sum of weight is greater or equal to this, it should pass. - /// The equivalent formula can be found in CometBFT [here](https://github.com/cometbft/cometbft/blob/a8991d63e5aad8be82b90329b55413e3a4933dc0/types/vote_set.go#L307). - pub fn quorum_threshold(&self) -> Stm { - let total_weight: Weight = self.power_table.read().map(|pt| pt.values().sum())?; - - Ok(total_weight * 2 / 3 + 1) - } - - /// Return the height of the first entry in the chain. - /// - /// This is the block that was finalized *in the ledger*. - pub fn last_finalized_height(&self) -> Stm { - self.chain - .read() - .map(|c| c.get_min().map(|(h, _)| *h).unwrap_or_default()) - } - - /// Return the height of the last entry in the chain. - /// - /// This is the block that we can cast our vote on as final. - pub fn latest_height(&self) -> Stm { - self.chain - .read() - .map(|c| c.get_max().map(|(h, _)| *h).unwrap_or_default()) - } - - /// Get the hash of a block at the given height, if known. - pub fn block_hash(&self, height: BlockHeight) -> Stm> { - self.chain.read().map(|c| c.get(&height).cloned().flatten()) - } - - /// Add the next final block observed on the parent blockchain. - /// - /// Returns an error unless it's exactly the next expected height, - /// so the caller has to call this in every epoch. If the parent - /// chain produced no blocks in that epoch then pass `None` to - /// represent that null-round in the tally. - pub fn add_block( - &self, - block_height: BlockHeight, - block_hash: Option, - ) -> StmResult<(), Error> { - let mut chain = self.chain.read_clone()?; - - // Check that we are extending the chain. We could also ignore existing heights. - match chain.get_max() { - None => { - return abort(Error::Uninitialized); - } - Some((parent_height, _)) => { - if block_height != parent_height + 1 { - return abort(Error::UnexpectedBlock(parent_height + 1, block_height)); - } - } - } - - chain.insert(block_height, block_hash); - - self.chain.write(chain)?; - - Ok(()) - } - - /// Add a vote we received. - /// - /// Returns `true` if this vote was added, `false` if it was ignored as a - /// duplicate or a height we already finalized, and an error if it's an - /// equivocation or from a validator we don't know. - pub fn add_vote( - &self, - validator_key: K, - block_height: BlockHeight, - block_hash: V, - ) -> StmResult> { - if *self.pause_votes.read()? { - retry()?; - } - - let min_height = self.last_finalized_height()?; - - if block_height < min_height { - return Ok(false); - } - - if !self.has_power(&validator_key)? { - return abort(Error::UnpoweredValidator(validator_key)); - } - - let mut votes = self.votes.read_clone()?; - let votes_at_height = votes.entry(block_height).or_default(); - - for (bh, vs) in votes_at_height.iter() { - if *bh != block_hash && vs.contains(&validator_key) { - return abort(Error::Equivocation( - validator_key, - block_height, - block_hash, - bh.clone(), - )); - } - } - - let validator_pub_key = validator_key.to_string(); - - let votes_for_block = votes_at_height.entry(block_hash.clone()).or_default(); - - if votes_for_block.insert(validator_key).is_some() { - return Ok(false); - } - - self.votes.write(votes)?; - - emit(ParentFinalityPeerVoteReceived { - block_height, - validator: &validator_pub_key, - block_hash: HexEncodableBlockHash(block_hash.as_ref().to_vec()), - // TODO- this needs to be the commitment hash once implemented - commitment_hash: None, - }); - - Ok(true) - } - - /// Pause adding more votes until we are finished calling `find_quorum` which - /// automatically re-enables them. - pub fn pause_votes_until_find_quorum(&self) -> Stm<()> { - self.pause_votes.write(true) - } - - /// Find a block on the (from our perspective) finalized chain that gathered enough votes from validators. - pub fn find_quorum(&self) -> Stm> { - self.pause_votes.write(false)?; - - let quorum_threshold = self.quorum_threshold()?; - let chain = self.chain.read()?; - - let Some((finalized_height, _)) = chain.get_min() else { - tracing::debug!("finalized height not found"); - return Ok(None); - }; - - let votes = self.votes.read()?; - let power_table = self.power_table.read()?; - - let mut weight = 0; - let mut voters = im::HashSet::new(); - - for (block_height, block_hash) in chain.iter().rev() { - if block_height == finalized_height { - tracing::debug!( - block_height, - finalized_height, - "finalized height and block height equal, no new proposals" - ); - break; // This block is already finalized in the ledger, no need to propose it again. - } - let Some(block_hash) = block_hash else { - tracing::debug!(block_height, "null block found in vote proposal"); - continue; // Skip null blocks - }; - let Some(votes_at_height) = votes.get(block_height) else { - tracing::debug!(block_height, "no votes"); - continue; - }; - let Some(votes_for_block) = votes_at_height.get(block_hash) else { - tracing::debug!(block_height, "no votes for block"); - continue; // We could detect equovicating voters here. - }; - - for vk in votes_for_block { - if voters.insert(vk.clone()).is_none() { - // New voter, get their current weight; it might be 0 if they have been removed. - weight += power_table.get(vk).cloned().unwrap_or_default(); - tracing::debug!(weight, key = ?vk, "new voter"); - } - } - - tracing::debug!(weight, quorum_threshold, "showdown"); - - if weight >= quorum_threshold { - emit(ParentFinalityPeerQuorumReached { - block_height: *block_height, - block_hash: HexEncodableBlockHash(block_hash.as_ref().to_vec()), - // TODO - just placeholder - need to use real commitment once implemented - commitment_hash: None, - weight, - }); - - return Ok(Some((*block_height, block_hash.clone()))); - } - } - - Ok(None) - } - - /// Call when a new finalized block is added to the ledger, to clear out all preceding blocks. - /// - /// After this operation the minimum item in the chain will the new finalized block. - pub fn set_finalized( - &self, - parent_block_height: BlockHeight, - parent_block_hash: V, - proposer: Option<&str>, - local_block_height: Option, - ) -> Stm<()> { - self.chain.update(|chain| { - let (_, mut chain) = chain.split(&parent_block_height); - chain.insert(parent_block_height, Some(parent_block_hash.clone())); - chain - })?; - - self.votes - .update(|votes| votes.split(&parent_block_height).1)?; - - emit(ParentFinalityCommitted { - local_height: local_block_height, - parent_height: parent_block_height, - block_hash: HexEncodableBlockHash(parent_block_hash.as_ref().to_vec()), - proposer, - }); - - Ok(()) - } - - /// Overwrite the power table after it has changed to a new snapshot. - /// - /// This method expects absolute values, it completely replaces the existing powers. - pub fn set_power_table(&self, power_table: Vec<(K, Weight)>) -> Stm<()> { - let power_table = im::HashMap::from_iter(power_table); - // We don't actually have to remove the votes of anyone who is no longer a validator, - // we just have to make sure to handle the case when they are not in the power table. - self.power_table.write(power_table) - } - - /// Update the power table after it has changed with changes. - /// - /// This method expects only the updated values, leaving everyone who isn't in it untouched - pub fn update_power_table(&self, power_updates: Vec<(K, Weight)>) -> Stm<()> { - if power_updates.is_empty() { - return Ok(()); - } - // We don't actually have to remove the votes of anyone who is no longer a validator, - // we just have to make sure to handle the case when they are not in the power table. - self.power_table.update_mut(|pt| { - for (vk, w) in power_updates { - if w == 0 { - pt.remove(&vk); - } else { - *pt.entry(vk).or_default() = w; - } - } - }) - } -} - -/// Poll the vote tally for new finalized blocks and publish a vote about them if the validator is part of the power table. -pub async fn publish_vote_loop( - vote_tally: VoteTally, - // Throttle votes to maximum 1/interval - vote_interval: Duration, - // Publish a vote after a timeout even if it's the same as before. - vote_timeout: Duration, - key: libp2p::identity::Keypair, - subnet_id: ipc_api::subnet_id::SubnetID, - client: ipc_ipld_resolver::Client, - to_vote: F, -) where - F: Fn(BlockHeight, BlockHash) -> V, - V: Serialize + DeserializeOwned, -{ - let validator_key = ValidatorKey::from(key.public()); - - let mut vote_interval = tokio::time::interval(vote_interval); - vote_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); - - let mut prev = None; - - loop { - let prev_height = prev - .as_ref() - .map(|(height, _, _)| *height) - .unwrap_or_default(); - - let result = tokio::time::timeout( - vote_timeout, - atomically_or_err(|| { - let next_height = vote_tally.latest_height()?; - - if next_height == prev_height { - retry()?; - } - - let next_hash = match vote_tally.block_hash(next_height)? { - Some(next_hash) => next_hash, - None => retry()?, - }; - - let has_power = vote_tally.has_power(&validator_key)?; - - if has_power { - // Add our own vote to the tally directly rather than expecting a message from the gossip channel. - // TODO (ENG-622): I'm not sure gossip messages published by this node would be delivered to it, so this might be the only way. - // NOTE: We should not see any other error from this as we just checked that the validator had power, - // but for piece of mind let's return and log any potential errors, rather than ignore them. - vote_tally.add_vote(validator_key.clone(), next_height, next_hash.clone())?; - } - - Ok((next_height, next_hash, has_power)) - }), - ) - .await; - - let (next_height, next_hash, has_power) = match result { - Ok(Ok(vs)) => vs, - Err(_) => { - if let Some(ref vs) = prev { - tracing::debug!("vote timeout; re-publishing previous vote"); - vs.clone() - } else { - tracing::debug!("vote timeout, but no previous vote to re-publish"); - continue; - } - } - Ok(Err(e)) => { - tracing::error!( - error = e.to_string(), - "failed to get next height to vote on" - ); - continue; - } - }; - - if has_power && prev_height > 0 { - tracing::debug!(block_height = next_height, "publishing finality vote"); - - let vote = to_vote(next_height, next_hash.clone()); - - match VoteRecord::signed(&key, subnet_id.clone(), vote) { - Ok(vote) => { - if let Err(e) = client.publish_vote(vote) { - tracing::error!(error = e.to_string(), "failed to publish vote"); - } - - emit(ParentFinalityPeerVoteSent { - block_height: next_height, - block_hash: HexEncodableBlockHash(next_hash.clone()), - commitment_hash: None, - }); - } - Err(e) => { - tracing::error!(error = e.to_string(), "failed to sign vote"); - } - } - - // Throttle vote gossiping at periods of fast syncing. For example if we create a subnet contract on Friday - // and bring up a local testnet on Monday, all nodes would be ~7000 blocks behind a Lotus parent. CometBFT - // would be in-sync, and they could rapidly try to gossip votes on previous heights. GossipSub might not like - // that, and we can just cast our votes every now and then to finalize multiple blocks. - vote_interval.tick().await; - } - - prev = Some((next_height, next_hash, has_power)); - } -} diff --git a/fendermint/vm/topdown/tests/vote_reactor.rs b/fendermint/vm/topdown/tests/vote_reactor.rs index 84ddfad8b..324030638 100644 --- a/fendermint/vm/topdown/tests/vote_reactor.rs +++ b/fendermint/vm/topdown/tests/vote_reactor.rs @@ -15,7 +15,7 @@ use fendermint_vm_topdown::vote::gossip::GossipClient; use fendermint_vm_topdown::vote::payload::{PowerUpdates, Vote}; use fendermint_vm_topdown::vote::store::InMemoryVoteStore; use fendermint_vm_topdown::vote::{ - start_vote_reactor, Config, StartVoteReactorParams, VoteReactorClient, Weight, + start_vote_reactor, StartVoteReactorParams, VoteConfig, VoteReactorClient, Weight, }; use fendermint_vm_topdown::BlockHeight; use tokio::sync::broadcast; @@ -57,8 +57,8 @@ impl GossipClient for ChannelGossipClient { } } -fn default_config() -> Config { - Config { +fn default_config() -> VoteConfig { + VoteConfig { req_channel_buffer_size: 1024, req_batch_processing_size: 10, gossip_req_processing_size: 10, diff --git a/ipc/api/src/staking.rs b/ipc/api/src/staking.rs index 723a10a76..4f0a93f76 100644 --- a/ipc/api/src/staking.rs +++ b/ipc/api/src/staking.rs @@ -13,7 +13,7 @@ use std::fmt::{Display, Formatter}; pub type ConfigurationNumber = u64; -#[derive(Clone, Debug, num_enum::TryFromPrimitive, Deserialize, Serialize)] +#[derive(Clone, Debug, num_enum::TryFromPrimitive, Deserialize, Serialize, PartialEq, Eq)] #[non_exhaustive] #[repr(u8)] pub enum StakingOperation { @@ -23,14 +23,14 @@ pub enum StakingOperation { SetFederatedPower = 3, } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] pub struct StakingChangeRequest { pub configuration_number: ConfigurationNumber, pub change: StakingChange, } /// The change request to validator staking -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] pub struct StakingChange { pub op: StakingOperation, pub payload: Vec, diff --git a/ipc/cli/src/commands/crossmsg/topdown_cross.rs b/ipc/cli/src/commands/crossmsg/topdown_cross.rs index 3e100bc84..711575a66 100644 --- a/ipc/cli/src/commands/crossmsg/topdown_cross.rs +++ b/ipc/cli/src/commands/crossmsg/topdown_cross.rs @@ -72,7 +72,7 @@ impl CommandLineHandler for LatestParentFinality { let provider = get_ipc_provider(global)?; let subnet = SubnetID::from_str(&arguments.subnet)?; - println!("{}", provider.latest_parent_finality(&subnet).await?); + println!("{}", provider.latest_topdown_checkpoint(&subnet).await?); Ok(()) } } diff --git a/ipc/provider/src/lib.rs b/ipc/provider/src/lib.rs index 5fa469f30..c53f307f7 100644 --- a/ipc/provider/src/lib.rs +++ b/ipc/provider/src/lib.rs @@ -722,11 +722,11 @@ impl IpcProvider { conn.manager().list_bootstrap_nodes(subnet).await } - /// Returns the latest finality from the parent committed in a child subnet. - pub async fn latest_parent_finality(&self, subnet: &SubnetID) -> anyhow::Result { + /// Returns the latest topdown checkpoint from the parent committed in a child subnet. + pub async fn latest_topdown_checkpoint(&self, subnet: &SubnetID) -> anyhow::Result { let conn = self.get_connection(subnet)?; - conn.manager().latest_parent_finality().await + conn.manager().latest_topdown_checkpoint().await } pub async fn set_federated_power( diff --git a/ipc/provider/src/manager/evm/manager.rs b/ipc/provider/src/manager/evm/manager.rs index 21fc3416f..27c4a5312 100644 --- a/ipc/provider/src/manager/evm/manager.rs +++ b/ipc/provider/src/manager/evm/manager.rs @@ -233,14 +233,14 @@ impl TopDownFinalityQuery for EthSubnetManager { }) } - async fn latest_parent_finality(&self) -> Result { + async fn latest_topdown_checkpoint(&self) -> Result { tracing::info!("querying latest parent finality "); let contract = gateway_getter_facet::GatewayGetterFacet::new( self.ipc_contract_info.gateway_addr, Arc::new(self.ipc_contract_info.provider.clone()), ); - let finality = contract.get_latest_parent_finality().call().await?; + let finality = contract.get_latest_topdown_checkpoint().call().await?; Ok(finality.height.as_u64() as ChainEpoch) } } diff --git a/ipc/provider/src/manager/subnet.rs b/ipc/provider/src/manager/subnet.rs index cc47ab093..9d89ac46d 100644 --- a/ipc/provider/src/manager/subnet.rs +++ b/ipc/provider/src/manager/subnet.rs @@ -247,8 +247,8 @@ pub trait TopDownFinalityQuery: Send + Sync { subnet_id: &SubnetID, epoch: ChainEpoch, ) -> Result>>; - /// Returns the latest parent finality committed in a child subnet - async fn latest_parent_finality(&self) -> Result; + /// Returns the latest topdown checkpoint committed in a child subnet + async fn latest_topdown_checkpoint(&self) -> Result; } /// The bottom up checkpoint manager that handles the bottom up relaying from child subnet to the parent diff --git a/ipld/resolver/src/behaviour/membership.rs b/ipld/resolver/src/behaviour/membership.rs index ac1ebd249..975e47205 100644 --- a/ipld/resolver/src/behaviour/membership.rs +++ b/ipld/resolver/src/behaviour/membership.rs @@ -27,7 +27,7 @@ use tokio::time::{Instant, Interval}; use crate::hash::blake2b_256; use crate::provider_cache::{ProviderDelta, SubnetProviderCache}; use crate::provider_record::{ProviderRecord, SignedProviderRecord}; -use crate::vote_record::{SignedVoteRecord, VoteRecord}; +use crate::vote_record::{SignedVoteRecord, SubnetVoteRecord, VoteRecord}; use crate::{stats, Timestamp}; use super::NetworkConfig; @@ -53,8 +53,8 @@ pub enum Event { /// to trigger a lookup by the discovery module to learn the address. Skipped(PeerId), - /// We received a [`VoteRecord`] in one of the subnets we are providing data for. - ReceivedVote(Box>), + /// We received a vote in one of the subnets we are providing data for. + ReceivedVote(Box), /// We received preemptive data published in a subnet we were interested in. ReceivedPreemptive(SubnetID, Vec), @@ -341,9 +341,9 @@ where } /// Publish the vote of the validator running the agent about a CID to a subnet. - pub fn publish_vote(&mut self, vote: SignedVoteRecord) -> anyhow::Result<()> { - let topic = self.voting_topic(&vote.record().subnet_id); - let data = vote.into_envelope().into_protobuf_encoding(); + pub fn publish_vote(&mut self, vote: SubnetVoteRecord) -> anyhow::Result<()> { + let topic = self.voting_topic(&vote.subnet); + let data = fvm_ipld_encoding::to_vec(&vote.vote)?; match self.inner.publish(topic, data) { Err(e) => { stats::MEMBERSHIP_PUBLISH_FAILURE.inc(); @@ -415,7 +415,7 @@ where } } } else if self.voting_topics.contains(&msg.topic) { - match SignedVoteRecord::from_bytes(&msg.data).map(|r| r.into_record()) { + match fvm_ipld_encoding::from_slice(&msg.data) { Ok(record) => self.handle_vote_record(record), Err(e) => { stats::MEMBERSHIP_INVALID_MESSAGE.inc(); @@ -465,7 +465,7 @@ where } /// Raise an event to tell we received a new vote. - fn handle_vote_record(&mut self, record: VoteRecord) { + fn handle_vote_record(&mut self, record: V) { self.outbox.push_back(Event::ReceivedVote(Box::new(record))) } diff --git a/ipld/resolver/src/client.rs b/ipld/resolver/src/client.rs index 29e9eac55..45f108da1 100644 --- a/ipld/resolver/src/client.rs +++ b/ipld/resolver/src/client.rs @@ -7,6 +7,7 @@ use libipld::Cid; use tokio::sync::mpsc::UnboundedSender; use tokio::sync::oneshot; +use crate::vote_record::SubnetVoteRecord; use crate::{ service::{Request, ResolveResult}, vote_record::SignedVoteRecord, @@ -71,7 +72,7 @@ impl Client { } /// Publish a signed vote into a topic based on its subnet. - pub fn publish_vote(&self, vote: SignedVoteRecord) -> anyhow::Result<()> { + pub fn publish_vote(&self, vote: SubnetVoteRecord) -> anyhow::Result<()> { let req = Request::PublishVote(Box::new(vote)); self.send_request(req) } diff --git a/ipld/resolver/src/lib.rs b/ipld/resolver/src/lib.rs index d2a14cb0b..01db630c7 100644 --- a/ipld/resolver/src/lib.rs +++ b/ipld/resolver/src/lib.rs @@ -23,4 +23,4 @@ pub use behaviour::{ContentConfig, DiscoveryConfig, MembershipConfig, NetworkCon pub use client::{Client, Resolver}; pub use service::{Config, ConnectionConfig, Event, NoKnownPeers, Service}; pub use timestamp::Timestamp; -pub use vote_record::{ValidatorKey, VoteRecord}; +pub use vote_record::{SubnetVoteRecord, ValidatorKey}; diff --git a/ipld/resolver/src/service.rs b/ipld/resolver/src/service.rs index 55ca0af7b..b200fcce7 100644 --- a/ipld/resolver/src/service.rs +++ b/ipld/resolver/src/service.rs @@ -34,7 +34,7 @@ use crate::behaviour::{ }; use crate::client::Client; use crate::stats; -use crate::vote_record::{SignedVoteRecord, VoteRecord}; +use crate::vote_record::{SignedVoteRecord, SubnetVoteRecord, VoteRecord}; /// Result of attempting to resolve a CID. pub type ResolveResult = anyhow::Result<()>; @@ -91,7 +91,7 @@ pub(crate) enum Request { SetProvidedSubnets(Vec), AddProvidedSubnet(SubnetID), RemoveProvidedSubnet(SubnetID), - PublishVote(Box>), + PublishVote(Box>), PublishPreemptive(SubnetID, Vec), PinSubnet(SubnetID), UnpinSubnet(SubnetID), @@ -105,7 +105,7 @@ pub(crate) enum Request { #[derive(Clone, Debug)] pub enum Event { /// Received a vote about in a subnet about a CID. - ReceivedVote(Box>), + ReceivedVote(Box), /// Received raw pre-emptive data published to a pinned subnet. ReceivedPreemptive(SubnetID, Vec), } diff --git a/ipld/resolver/src/vote_record.rs b/ipld/resolver/src/vote_record.rs index 3678d5e47..62ed27cc9 100644 --- a/ipld/resolver/src/vote_record.rs +++ b/ipld/resolver/src/vote_record.rs @@ -95,6 +95,11 @@ impl Record for VoteRecord { pub type SignedVoteRecord = SignedRecord>; +pub struct SubnetVoteRecord { + pub subnet: SubnetID, + pub vote: V, +} + impl VoteRecord where C: Serialize + DeserializeOwned, diff --git a/ipld/resolver/tests/smoke.rs b/ipld/resolver/tests/smoke.rs index 632df8dd2..c0ad04da0 100644 --- a/ipld/resolver/tests/smoke.rs +++ b/ipld/resolver/tests/smoke.rs @@ -221,10 +221,8 @@ async fn single_bootstrap_publish_receive_vote() { tokio::time::sleep(Duration::from_secs(2)).await; // Vote on some random CID. - let validator_key = Keypair::generate_secp256k1(); let cid = Cid::new_v1(IPLD_RAW, Code::Sha2_256.digest(b"foo")); - let vote = - VoteRecord::signed(&validator_key, subnet_id, TestVote(cid)).expect("failed to sign vote"); + let vote = TestVote(cid); // Pubilish vote cluster.agents[0] @@ -239,7 +237,7 @@ async fn single_bootstrap_publish_receive_vote() { .expect("error receiving vote"); if let Event::ReceivedVote(v) = event { - assert_eq!(&*v, vote.record()); + assert_eq!(&*v, vote); } else { panic!("unexpected {event:?}") } From 9dc15f7c595f43b2e4810ca9e4ee022d65543f23 Mon Sep 17 00:00:00 2001 From: cryptoAtwill Date: Thu, 3 Oct 2024 16:41:57 +0800 Subject: [PATCH 05/22] clippy --- fendermint/vm/topdown/src/lib.rs | 32 +---------------------- fendermint/vm/topdown/src/syncer/mod.rs | 4 --- fendermint/vm/topdown/src/syncer/store.rs | 6 +++++ fendermint/vm/topdown/src/vote/mod.rs | 2 +- ipld/resolver/src/behaviour/membership.rs | 2 +- ipld/resolver/src/client.rs | 5 +--- ipld/resolver/src/service.rs | 2 +- 7 files changed, 11 insertions(+), 42 deletions(-) diff --git a/fendermint/vm/topdown/src/lib.rs b/fendermint/vm/topdown/src/lib.rs index 915a262dc..17027c7f5 100644 --- a/fendermint/vm/topdown/src/lib.rs +++ b/fendermint/vm/topdown/src/lib.rs @@ -13,23 +13,18 @@ pub mod observe; pub mod syncer; pub mod vote; -use async_stm::Stm; -use async_trait::async_trait; use ethers::utils::hex; use fendermint_crypto::quorum::ECDSACertificate; -use fvm_shared::clock::ChainEpoch; use ipc_api::cross::IpcEnvelope; use ipc_api::staking::StakingChangeRequest; use serde::{Deserialize, Serialize}; use std::fmt::{Display, Formatter}; -use std::sync::Arc; -use std::time::Duration; pub use crate::cache::{SequentialAppendError, SequentialKeyCache, ValueIter}; pub use crate::error::Error; use crate::observation::{LinearizedParentBlockView, Observation}; use crate::syncer::{ParentSyncerConfig, ParentSyncerReactorClient}; -use crate::vote::payload::{PowerTable, PowerUpdates}; +use crate::vote::payload::PowerUpdates; use crate::vote::{VoteConfig, VoteReactorClient}; pub type BlockHeight = u64; @@ -38,10 +33,6 @@ pub type BlockHash = Bytes; /// The null round error message pub(crate) const NULL_ROUND_ERR_MSG: &str = "requested epoch was a null round"; -/// Default topdown proposal height range -pub(crate) const DEFAULT_MAX_PROPOSAL_RANGE: BlockHeight = 100; -pub(crate) const DEFAULT_MAX_CACHE_BLOCK: BlockHeight = 500; -pub(crate) const DEFAULT_PROPOSAL_DELAY: BlockHeight = 2; #[derive(Debug, Clone, Deserialize)] pub struct Config { @@ -153,27 +144,6 @@ impl TopdownClient { } } -/// If res is null round error, returns the default value from f() -pub(crate) fn handle_null_round T>( - res: anyhow::Result, - f: F, -) -> anyhow::Result { - match res { - Ok(t) => Ok(t), - Err(e) => { - if is_null_round_error(&e) { - Ok(f()) - } else { - Err(e) - } - } - } -} - -pub(crate) fn is_null_round_error(err: &anyhow::Error) -> bool { - is_null_round_str(&err.to_string()) -} - pub(crate) fn is_null_round_str(s: &str) -> bool { s.contains(NULL_ROUND_ERR_MSG) } diff --git a/fendermint/vm/topdown/src/syncer/mod.rs b/fendermint/vm/topdown/src/syncer/mod.rs index dbfea2cfe..671de9034 100644 --- a/fendermint/vm/topdown/src/syncer/mod.rs +++ b/fendermint/vm/topdown/src/syncer/mod.rs @@ -2,11 +2,7 @@ // SPDX-License-Identifier: Apache-2.0, MIT use crate::observation::{Observation, ObservationConfig}; -use crate::proxy::ParentQueryProxy; -use crate::syncer::error::Error; use crate::syncer::payload::ParentBlockView; -use crate::syncer::poll::ParentPoll; -use crate::syncer::store::ParentViewStore; use crate::{BlockHeight, Checkpoint}; use anyhow::anyhow; use async_trait::async_trait; diff --git a/fendermint/vm/topdown/src/syncer/store.rs b/fendermint/vm/topdown/src/syncer/store.rs index ac6eed00c..e952f60b2 100644 --- a/fendermint/vm/topdown/src/syncer/store.rs +++ b/fendermint/vm/topdown/src/syncer/store.rs @@ -25,6 +25,12 @@ pub struct InMemoryParentViewStore { inner: SequentialKeyCache, } +impl Default for InMemoryParentViewStore { + fn default() -> Self { + Self::new() + } +} + impl InMemoryParentViewStore { pub fn new() -> Self { Self { diff --git a/fendermint/vm/topdown/src/vote/mod.rs b/fendermint/vm/topdown/src/vote/mod.rs index 7b89a5b6d..64e344fba 100644 --- a/fendermint/vm/topdown/src/vote/mod.rs +++ b/fendermint/vm/topdown/src/vote/mod.rs @@ -21,7 +21,7 @@ use error::Error; use fendermint_crypto::quorum::ECDSACertificate; use fendermint_crypto::SecretKey; use fendermint_vm_genesis::ValidatorKey; -use serde::{Deserialize, Serialize}; +use serde::Deserialize; use std::borrow::Borrow; use std::collections::HashMap; use std::time::Duration; diff --git a/ipld/resolver/src/behaviour/membership.rs b/ipld/resolver/src/behaviour/membership.rs index 975e47205..25f69a0e2 100644 --- a/ipld/resolver/src/behaviour/membership.rs +++ b/ipld/resolver/src/behaviour/membership.rs @@ -27,7 +27,7 @@ use tokio::time::{Instant, Interval}; use crate::hash::blake2b_256; use crate::provider_cache::{ProviderDelta, SubnetProviderCache}; use crate::provider_record::{ProviderRecord, SignedProviderRecord}; -use crate::vote_record::{SignedVoteRecord, SubnetVoteRecord, VoteRecord}; +use crate::vote_record::SubnetVoteRecord; use crate::{stats, Timestamp}; use super::NetworkConfig; diff --git a/ipld/resolver/src/client.rs b/ipld/resolver/src/client.rs index 45f108da1..0e241ea2b 100644 --- a/ipld/resolver/src/client.rs +++ b/ipld/resolver/src/client.rs @@ -8,10 +8,7 @@ use tokio::sync::mpsc::UnboundedSender; use tokio::sync::oneshot; use crate::vote_record::SubnetVoteRecord; -use crate::{ - service::{Request, ResolveResult}, - vote_record::SignedVoteRecord, -}; +use crate::service::{Request, ResolveResult}; /// A facade to the [`Service`] to provide a nicer interface than message passing would allow on its own. #[derive(Clone)] diff --git a/ipld/resolver/src/service.rs b/ipld/resolver/src/service.rs index b200fcce7..33e595044 100644 --- a/ipld/resolver/src/service.rs +++ b/ipld/resolver/src/service.rs @@ -34,7 +34,7 @@ use crate::behaviour::{ }; use crate::client::Client; use crate::stats; -use crate::vote_record::{SignedVoteRecord, SubnetVoteRecord, VoteRecord}; +use crate::vote_record::SubnetVoteRecord; /// Result of attempting to resolve a CID. pub type ResolveResult = anyhow::Result<()>; From faf67f3739078b02a2860902b43f718a6c940f7c Mon Sep 17 00:00:00 2001 From: cryptoAtwill Date: Thu, 3 Oct 2024 16:50:31 +0800 Subject: [PATCH 06/22] clippy --- fendermint/app/src/app.rs | 2 +- fendermint/app/src/cmd/run.rs | 11 ++++++++--- fendermint/vm/topdown/src/launch.rs | 7 ++----- fendermint/vm/topdown/src/syncer/mod.rs | 4 +++- fendermint/vm/topdown/src/syncer/poll.rs | 5 +++++ ipld/resolver/src/signed_record.rs | 8 -------- ipld/resolver/tests/smoke.rs | 9 +++------ 7 files changed, 22 insertions(+), 24 deletions(-) diff --git a/fendermint/app/src/app.rs b/fendermint/app/src/app.rs index eed23e78b..3f9cd7691 100644 --- a/fendermint/app/src/app.rs +++ b/fendermint/app/src/app.rs @@ -314,7 +314,7 @@ where Ok(ret) } - pub async fn enable_topdown(&mut self, topdown: TopdownClient) { + pub fn enable_topdown(&mut self, topdown: TopdownClient) { self.chain_env.topdown_client = Toggle::enable(topdown); } diff --git a/fendermint/app/src/cmd/run.rs b/fendermint/app/src/cmd/run.rs index 722d35b6d..541b563da 100644 --- a/fendermint/app/src/cmd/run.rs +++ b/fendermint/app/src/cmd/run.rs @@ -34,7 +34,7 @@ use fendermint_vm_topdown::proxy::{ use fendermint_vm_topdown::syncer::payload::ParentBlockView; use fendermint_vm_topdown::syncer::poll::ParentPoll; use fendermint_vm_topdown::syncer::store::{InMemoryParentViewStore, ParentViewStore}; -use fendermint_vm_topdown::syncer::{start_parent_syncer, ParentPoller, ParentSyncerConfig}; +use fendermint_vm_topdown::syncer::{start_parent_syncer, ParentPoller, ParentSyncerConfig, TopDownSyncEvent}; use fendermint_vm_topdown::vote::error::Error; use fendermint_vm_topdown::vote::gossip::GossipClient; use fendermint_vm_topdown::vote::payload::Vote; @@ -52,6 +52,7 @@ use std::sync::Arc; use tendermint_rpc::Client; use tokio::sync::broadcast; use tokio::sync::broadcast::error::{RecvError, TryRecvError}; +use tokio::sync::broadcast::Receiver; use tower::ServiceBuilder; use tracing::info; @@ -267,7 +268,7 @@ async fn run(settings: Settings) -> anyhow::Result<()> { let app_parent_finality_query = AppParentFinalityQuery::new(app.clone()); let topdown_config = settings.ipc.topdown_config()?; - let mut config = fendermint_vm_topdown::Config { + let config = fendermint_vm_topdown::Config { syncer: ParentSyncerConfig { request_channel_size: 1024, broadcast_channel_size: 1024, @@ -304,7 +305,7 @@ async fn run(settings: Settings) -> anyhow::Result<()> { .0, gossip_client, parent_proxy, - move |checkpoint, proxy, config, rx| { + move |checkpoint, proxy, config| { let poller_inner = ParentPoll::new(config, proxy, parent_view_store, checkpoint.clone()); TendermintAwareParentPoller { @@ -540,6 +541,10 @@ where S: ParentViewStore + Send + Sync + 'static, P: Send + Sync + 'static + ParentQueryProxy, { + fn subscribe(&self) -> Receiver { + self.inner.subscribe() + } + fn last_checkpoint(&self) -> &Checkpoint { self.inner.last_checkpoint() } diff --git a/fendermint/vm/topdown/src/launch.rs b/fendermint/vm/topdown/src/launch.rs index 09dfe2b5f..71e2475e9 100644 --- a/fendermint/vm/topdown/src/launch.rs +++ b/fendermint/vm/topdown/src/launch.rs @@ -38,7 +38,6 @@ pub async fn run_topdown( &Checkpoint, ParentClient, ParentSyncerConfig, - broadcast::Sender, ) -> Poller, ) -> anyhow::Result where @@ -60,15 +59,13 @@ where }) .collect::>(); - let (internal_event_tx, internal_event_rx) = - broadcast::channel(config.syncer.broadcast_channel_size); - let poller = poller_fn( &checkpoint, parent_client, config.syncer.clone(), - internal_event_tx, ); + let internal_event_rx = poller.subscribe(); + let syncer_client = start_parent_syncer(config.syncer, poller)?; let voting_client = start_vote_reactor(StartVoteReactorParams { diff --git a/fendermint/vm/topdown/src/syncer/mod.rs b/fendermint/vm/topdown/src/syncer/mod.rs index 671de9034..4683c9e14 100644 --- a/fendermint/vm/topdown/src/syncer/mod.rs +++ b/fendermint/vm/topdown/src/syncer/mod.rs @@ -9,7 +9,7 @@ use async_trait::async_trait; use serde::Deserialize; use std::time::Duration; use tokio::select; -use tokio::sync::{mpsc, oneshot}; +use tokio::sync::{broadcast, mpsc, oneshot}; pub mod error; pub mod payload; @@ -51,6 +51,8 @@ pub struct ParentSyncerReactorClient { /// Polls the parent block view #[async_trait] pub trait ParentPoller { + fn subscribe(&self) -> broadcast::Receiver; + /// The previous checkpoint committed fn last_checkpoint(&self) -> &Checkpoint; diff --git a/fendermint/vm/topdown/src/syncer/poll.rs b/fendermint/vm/topdown/src/syncer/poll.rs index 8695d61f8..38915d21d 100644 --- a/fendermint/vm/topdown/src/syncer/poll.rs +++ b/fendermint/vm/topdown/src/syncer/poll.rs @@ -15,6 +15,7 @@ use ipc_observability::emit; use ipc_observability::serde::HexEncodableBlockHash; use libp2p::futures::TryFutureExt; use tokio::sync::broadcast; +use tokio::sync::broadcast::Receiver; use tracing::instrument; pub struct ParentPoll { @@ -31,6 +32,10 @@ where S: ParentViewStore + Send + Sync + 'static, P: Send + Sync + 'static + ParentQueryProxy, { + fn subscribe(&self) -> Receiver { + self.event_broadcast.subscribe() + } + fn last_checkpoint(&self) -> &Checkpoint { &self.last_finalized } diff --git a/ipld/resolver/src/signed_record.rs b/ipld/resolver/src/signed_record.rs index 82e31d352..7a4087f52 100644 --- a/ipld/resolver/src/signed_record.rs +++ b/ipld/resolver/src/signed_record.rs @@ -65,14 +65,6 @@ where Ok(signed_record) } - pub fn record(&self) -> &R { - &self.record - } - - pub fn envelope(&self) -> &SignedEnvelope { - &self.envelope - } - pub fn into_record(self) -> R { self.record } diff --git a/ipld/resolver/tests/smoke.rs b/ipld/resolver/tests/smoke.rs index c0ad04da0..966029333 100644 --- a/ipld/resolver/tests/smoke.rs +++ b/ipld/resolver/tests/smoke.rs @@ -28,10 +28,7 @@ use fvm_ipld_encoding::IPLD_RAW; use fvm_ipld_hamt::Hamt; use fvm_shared::{address::Address, ActorID}; use ipc_api::subnet_id::SubnetID; -use ipc_ipld_resolver::{ - Client, Config, ConnectionConfig, ContentConfig, DiscoveryConfig, Event, MembershipConfig, - NetworkConfig, Resolver, Service, VoteRecord, -}; +use ipc_ipld_resolver::{Client, Config, ConnectionConfig, ContentConfig, DiscoveryConfig, Event, MembershipConfig, NetworkConfig, Resolver, Service, SubnetVoteRecord, VoteRecord}; use libp2p::{ core::{ muxing::StreamMuxerBox, @@ -227,7 +224,7 @@ async fn single_bootstrap_publish_receive_vote() { // Pubilish vote cluster.agents[0] .client - .publish_vote(vote.clone()) + .publish_vote(SubnetVoteRecord {vote, subnet: subnet_id.clone() }) .expect("failed to send vote"); // Receive vote. @@ -237,7 +234,7 @@ async fn single_bootstrap_publish_receive_vote() { .expect("error receiving vote"); if let Event::ReceivedVote(v) = event { - assert_eq!(&*v, vote); + assert_eq!(&*v, vote.record()); } else { panic!("unexpected {event:?}") } From b8784f9a3de5853e7482fd437ebb7b0b7a275215 Mon Sep 17 00:00:00 2001 From: cryptoAtwill Date: Thu, 3 Oct 2024 16:52:05 +0800 Subject: [PATCH 07/22] clippy --- fendermint/vm/topdown/src/launch.rs | 3 +-- ipld/resolver/src/signed_record.rs | 8 ++++++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/fendermint/vm/topdown/src/launch.rs b/fendermint/vm/topdown/src/launch.rs index 71e2475e9..6ca243205 100644 --- a/fendermint/vm/topdown/src/launch.rs +++ b/fendermint/vm/topdown/src/launch.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0, MIT use crate::proxy::ParentQueryProxy; -use crate::syncer::{start_parent_syncer, ParentPoller, ParentSyncerConfig, TopDownSyncEvent}; +use crate::syncer::{start_parent_syncer, ParentPoller, ParentSyncerConfig}; use crate::vote::gossip::GossipClient; use crate::vote::payload::PowerUpdates; use crate::vote::store::InMemoryVoteStore; @@ -15,7 +15,6 @@ use fendermint_vm_genesis::{Power, Validator, ValidatorKey}; use std::future::Future; use std::sync::Arc; use std::time::Duration; -use tokio::sync::broadcast; /// Run the topdown checkpointing in the background. This consists of two processes: /// - syncer: diff --git a/ipld/resolver/src/signed_record.rs b/ipld/resolver/src/signed_record.rs index 7a4087f52..82e31d352 100644 --- a/ipld/resolver/src/signed_record.rs +++ b/ipld/resolver/src/signed_record.rs @@ -65,6 +65,14 @@ where Ok(signed_record) } + pub fn record(&self) -> &R { + &self.record + } + + pub fn envelope(&self) -> &SignedEnvelope { + &self.envelope + } + pub fn into_record(self) -> R { self.record } From 38fb524d0255d603a945dfebc8052a50a997965a Mon Sep 17 00:00:00 2001 From: cryptoAtwill Date: Thu, 3 Oct 2024 17:16:47 +0800 Subject: [PATCH 08/22] fix tests --- Cargo.lock | 1 + .../materializer/tests/docker_tests/layer2.rs | 6 +- .../vm/message/golden/chain/ipc_top_down.cbor | 1 - .../vm/message/golden/chain/ipc_top_down.txt | 1 - fendermint/vm/message/src/ipc.rs | 20 - fendermint/vm/topdown/Cargo.toml | 2 +- fendermint/vm/topdown/src/lib.rs | 16 + fendermint/vm/topdown/src/vote/store.rs | 2 +- fendermint/vm/topdown/src/vote/tally.rs | 6 +- fendermint/vm/topdown/tests/smt_voting.rs | 508 ------------------ fendermint/vm/topdown/tests/vote_reactor.rs | 12 +- ipld/resolver/src/signed_record.rs | 2 + ipld/resolver/tests/smoke.rs | 6 +- 13 files changed, 36 insertions(+), 547 deletions(-) delete mode 100644 fendermint/vm/message/golden/chain/ipc_top_down.cbor delete mode 100644 fendermint/vm/message/golden/chain/ipc_top_down.txt delete mode 100644 fendermint/vm/topdown/tests/smt_voting.rs diff --git a/Cargo.lock b/Cargo.lock index 78d45926b..eb79cbb77 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3470,6 +3470,7 @@ dependencies = [ "num-rational", "num-traits", "prometheus", + "quickcheck", "rand", "serde", "serde_json", diff --git a/fendermint/testing/materializer/tests/docker_tests/layer2.rs b/fendermint/testing/materializer/tests/docker_tests/layer2.rs index acfd0f8f4..526068e51 100644 --- a/fendermint/testing/materializer/tests/docker_tests/layer2.rs +++ b/fendermint/testing/materializer/tests/docker_tests/layer2.rs @@ -11,7 +11,7 @@ use fendermint_materializer::{HasEthApi, ResourceId}; use fendermint_vm_actor_interface::init::builtin_actor_eth_addr; use fendermint_vm_actor_interface::ipc; use fendermint_vm_message::conv::from_fvm::to_eth_address; -use ipc_actors_abis::gateway_getter_facet::{GatewayGetterFacet, ParentFinality}; +use ipc_actors_abis::gateway_getter_facet::{GatewayGetterFacet, TopdownCheckpoint}; use ipc_actors_abis::subnet_actor_getter_facet::SubnetActorGetterFacet; use crate::with_testnet; @@ -71,8 +71,8 @@ async fn test_topdown_and_bottomup() { { let mut retry = 0; loop { - let finality: ParentFinality = england_gateway - .get_latest_parent_finality() + let finality: TopdownCheckpoint = england_gateway + .get_latest_topdown_checkpoint() .call() .await .context("failed to get parent finality")?; diff --git a/fendermint/vm/message/golden/chain/ipc_top_down.cbor b/fendermint/vm/message/golden/chain/ipc_top_down.cbor deleted file mode 100644 index 212c6396c..000000000 --- a/fendermint/vm/message/golden/chain/ipc_top_down.cbor +++ /dev/null @@ -1 +0,0 @@ -a163497063a16b546f70446f776e45786563a2666865696768741ac0c004dd6a626c6f636b5f6861736889189600186418d418d10118b418a50c \ No newline at end of file diff --git a/fendermint/vm/message/golden/chain/ipc_top_down.txt b/fendermint/vm/message/golden/chain/ipc_top_down.txt deleted file mode 100644 index 8ebe9328a..000000000 --- a/fendermint/vm/message/golden/chain/ipc_top_down.txt +++ /dev/null @@ -1 +0,0 @@ -Ipc(TopDownExec(ParentFinality { height: 3233809629, block_hash: [150, 0, 100, 212, 209, 1, 180, 165, 12] })) \ No newline at end of file diff --git a/fendermint/vm/message/src/ipc.rs b/fendermint/vm/message/src/ipc.rs index eecc27864..2156cd510 100644 --- a/fendermint/vm/message/src/ipc.rs +++ b/fendermint/vm/message/src/ipc.rs @@ -95,19 +95,8 @@ pub struct BottomUpCheckpoint { pub bottom_up_messages: Cid, // TODO: Use TCid } -/// A proposal of the parent view that validators will be voting on. -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] -pub struct ParentFinality { - /// Block height of this proposal. - pub height: ChainEpoch, - /// The block hash of the parent, expressed as bytes - pub block_hash: Vec, -} - #[cfg(feature = "arb")] mod arb { - - use crate::ipc::ParentFinality; use fendermint_testing::arb::{ArbAddress, ArbCid, ArbSubnetID, ArbTokenAmount}; use fvm_shared::crypto::signature::Signature; use quickcheck::{Arbitrary, Gen}; @@ -187,13 +176,4 @@ mod arb { } } } - - impl Arbitrary for ParentFinality { - fn arbitrary(g: &mut Gen) -> Self { - Self { - height: u32::arbitrary(g).into(), - block_hash: Vec::arbitrary(g), - } - } - } } diff --git a/fendermint/vm/topdown/Cargo.toml b/fendermint/vm/topdown/Cargo.toml index 18850b302..aad255983 100644 --- a/fendermint/vm/topdown/Cargo.toml +++ b/fendermint/vm/topdown/Cargo.toml @@ -31,6 +31,7 @@ tokio = { workspace = true } tracing = { workspace = true } prometheus = { workspace = true } arbitrary = { workspace = true } +quickcheck = { workspace = true } num-rational = { workspace = true } @@ -44,7 +45,6 @@ fendermint_crypto = { path = "../../crypto" } ipc-observability = { workspace = true } [dev-dependencies] - clap = { workspace = true } rand = { workspace = true } tracing-subscriber = { workspace = true } diff --git a/fendermint/vm/topdown/src/lib.rs b/fendermint/vm/topdown/src/lib.rs index 17027c7f5..004cf5c1b 100644 --- a/fendermint/vm/topdown/src/lib.rs +++ b/fendermint/vm/topdown/src/lib.rs @@ -193,3 +193,19 @@ impl Checkpoint { } } } + +impl quickcheck::Arbitrary for TopdownProposal { + fn arbitrary(g: &mut quickcheck::Gen) -> Self { + let observation = Observation::new( + u64::arbitrary(g), + Vec::arbitrary(g), + Vec::arbitrary(g) + ); + let cert = ECDSACertificate::new_of_size(observation, 1); + + Self { + cert, + effects: (vec![], vec![]), + } + } +} \ No newline at end of file diff --git a/fendermint/vm/topdown/src/vote/store.rs b/fendermint/vm/topdown/src/vote/store.rs index cc10f359f..576751da6 100644 --- a/fendermint/vm/topdown/src/vote/store.rs +++ b/fendermint/vm/topdown/src/vote/store.rs @@ -212,7 +212,7 @@ mod tests { .unwrap(), ); - let agg = VoteAgg(votes.iter().collect()); + let agg = VoteAgg(HashMap::from_iter(votes.iter().map(|v| (v.voter(), v)))); let weights = agg.observation_weights(&HashMap::from_iter(powers)); assert_eq!(weights, vec![(&observation1, 1), (&observation2, 2),]) } diff --git a/fendermint/vm/topdown/src/vote/tally.rs b/fendermint/vm/topdown/src/vote/tally.rs index 540133661..03e3d3183 100644 --- a/fendermint/vm/topdown/src/vote/tally.rs +++ b/fendermint/vm/topdown/src/vote/tally.rs @@ -326,7 +326,7 @@ mod tests { } let ob = vote_tally.find_quorum().unwrap().unwrap(); - assert_eq!(ob, observation); + assert_eq!(*ob.payload(), observation); } #[test] @@ -401,7 +401,7 @@ mod tests { } let ob = vote_tally.find_quorum().unwrap().unwrap(); - assert_eq!(ob.payload(), observation); + assert_eq!(*ob.payload(), observation); let new_powers = (0..3) .map(|_| (random_validator_key().1.clone(), 1)) @@ -447,6 +447,6 @@ mod tests { ]); let ob = vote_tally.find_quorum().unwrap().unwrap(); - assert_eq!(ob.payload(), observation); + assert_eq!(*ob.payload(), observation); } } diff --git a/fendermint/vm/topdown/tests/smt_voting.rs b/fendermint/vm/topdown/tests/smt_voting.rs deleted file mode 100644 index b605a79a1..000000000 --- a/fendermint/vm/topdown/tests/smt_voting.rs +++ /dev/null @@ -1,508 +0,0 @@ -// Copyright 2022-2024 Protocol Labs -// SPDX-License-Identifier: Apache-2.0, MIT - -//! State Machine Test for the finality voting tally component. -//! -//! The test simulates random events that the tally can receive, such as votes received -//! over gossip, power table updates, block being executed, and tests that the tally -//! correctly identifies the blocks which are agreeable to the majority of validator. -//! -//! It can be executed the following way: -//! -//! ```text -//! cargo test --release -p fendermint_vm_topdown --test smt_voting -//! ``` - -use core::fmt; -use std::{ - cmp::{max, min}, - collections::BTreeMap, - fmt::Debug, -}; - -use arbitrary::Unstructured; -use async_stm::{atomically, atomically_or_err, Stm, StmResult}; -use fendermint_testing::{smt, state_machine_test}; -use fendermint_vm_topdown::{ - voting::{self, VoteTally, Weight}, - BlockHash, BlockHeight, -}; -use im::HashSet; -//use rand::{rngs::StdRng, SeedableRng}; - -/// Size of window of voting relative to the last cast vote. -const MAX_VOTE_DELTA: BlockHeight = 5; -/// Maximum number of blocks to finalize at a time. -const MAX_FINALIZED_DELTA: BlockHeight = 5; - -state_machine_test!(voting, 10000 ms, 65512 bytes, 200 steps, VotingMachine::new()); -//state_machine_test!(voting, 0xf7ac11a50000ffe8, 200 steps, VotingMachine::new()); - -/// Test key to make debugging more readable. -#[derive(Debug, Clone, Hash, Eq, PartialEq, PartialOrd, Ord)] -pub struct VotingKey(u64); - -impl fmt::Display for VotingKey { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "VotingKey({})", self.0) - } -} - -pub type VotingError = voting::Error; - -pub enum VotingCommand { - /// The tally observes the next block fo the chain. - ExtendChain(BlockHeight, Option), - /// One of the validators voted on a block. - AddVote(VotingKey, BlockHeight, BlockHash), - /// Update the power table. - UpdatePower(Vec<(VotingKey, Weight)>), - /// A certain height was finalized in the ledger. - BlockFinalized(BlockHeight, BlockHash), - /// Ask the tally for the highest agreeable block. - FindQuorum, -} - -// Debug format without block hashes which make it unreadable. -impl Debug for VotingCommand { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::ExtendChain(arg0, arg1) => f - .debug_tuple("ExtendChain") - .field(arg0) - .field(&arg1.is_some()) - .finish(), - Self::AddVote(arg0, arg1, _arg2) => { - f.debug_tuple("AddVote").field(arg0).field(arg1).finish() - } - Self::UpdatePower(arg0) => f.debug_tuple("UpdatePower").field(arg0).finish(), - Self::BlockFinalized(arg0, _arg1) => { - f.debug_tuple("BlockFinalized").field(arg0).finish() - } - Self::FindQuorum => write!(f, "FindQuorum"), - } - } -} - -/// Model state of voting -#[derive(Clone)] -pub struct VotingState { - /// We have a single parent chain that everybody observes, just at different heights. - /// There is no forking in this test because we assume that the syncing component - /// only downloads blocks which are final, and that reorgs don't happen. - /// - /// Null blocks are represented by `None`. - /// - /// The tally is currently unable to handle reorgs and rejects equivocations anyway. - /// - /// TODO (ENG-623): Decide what we want to achieve with Equivocation detection. - chain: Vec>, - /// All the validator keys to help pic random ones. - validator_keys: Vec, - /// All the validators with varying weights (can be zero). - validator_states: BTreeMap, - - last_finalized_block: BlockHeight, - last_chain_block: BlockHeight, -} - -impl VotingState { - pub fn can_extend(&self) -> bool { - self.last_chain_block < self.max_chain_height() - } - - pub fn can_finalize(&self) -> bool { - // We can finalize a block even if we haven't observed the votes, - // if the majority of validators vote for an actual block that - // proposed it for execution. - self.last_finalized_block < self.max_chain_height() - } - - pub fn next_chain_block(&self) -> Option<(BlockHeight, Option)> { - if self.can_extend() { - let h = self.last_chain_block + 1; - Some((h, self.block_hash(h))) - } else { - None - } - } - - pub fn max_chain_height(&self) -> BlockHeight { - self.chain.len() as BlockHeight - 1 - } - - pub fn block_hash(&self, h: BlockHeight) -> Option { - self.chain[h as usize].clone() - } - - pub fn has_quorum(&self, h: BlockHeight) -> bool { - if self.block_hash(h).is_none() { - return false; - } - - let mut total_weight: Weight = 0; - let mut vote_weight: Weight = 0; - - for vs in self.validator_states.values() { - total_weight += vs.weight; - if vs.highest_vote >= h { - vote_weight += vs.weight; - } - } - - let threshold = total_weight * 2 / 3; - - vote_weight > threshold - } -} - -#[derive(Clone, Debug)] -pub struct ValidatorState { - /// Current voting power (can be zero). - weight: Weight, - /// The heights this validator explicitly voted on. - votes: HashSet, - /// The highest vote *currently on the chain* the validator has voted for already. - /// Initially zero, meaning everyone voted on the initial finalized block. - highest_vote: BlockHeight, -} - -pub struct VotingMachine { - /// Runtime for executing async commands. - runtime: tokio::runtime::Runtime, -} - -impl VotingMachine { - pub fn new() -> Self { - Self { - runtime: tokio::runtime::Runtime::new().expect("create tokio runtime"), - } - } - - fn atomically_or_err(&self, f: F) -> Result - where - F: Fn() -> StmResult, - { - self.runtime.block_on(atomically_or_err(f)) - } - - fn atomically(&self, f: F) -> T - where - F: Fn() -> Stm, - { - self.runtime.block_on(atomically(f)) - } - - // For convenience in the command handler. - fn atomically_ok(&self, f: F) -> Result - where - F: Fn() -> Stm, - { - Ok(self.atomically(f)) - } -} - -impl Default for VotingMachine { - fn default() -> Self { - Self::new() - } -} - -impl smt::StateMachine for VotingMachine { - /// The System Under Test is the Vote Tally. - type System = VoteTally; - /// The model state is defined here in the test. - type State = VotingState; - /// Random commands we can apply in a step. - type Command = VotingCommand; - /// Result of command application on the system. - /// - /// The only return value we are interested in is the finality. - type Result = Result, voting::Error>; - - /// New random state. - fn gen_state(&self, u: &mut Unstructured) -> arbitrary::Result { - let chain_length = u.int_in_range(40..=60)?; - let mut chain = Vec::new(); - for i in 0..chain_length { - if i == 0 || u.ratio(9, 10)? { - let block_hash = u.bytes(32)?; - chain.push(Some(Vec::from(block_hash))); - } else { - chain.push(None); - } - } - - let validator_count = u.int_in_range(1..=5)?; - //let mut rng = StdRng::seed_from_u64(u.arbitrary()?); - let mut validator_states = BTreeMap::new(); - - for i in 0..validator_count { - let min_weight = if i == 0 { 1u64 } else { 0u64 }; - let weight = u.int_in_range(min_weight..=100)?; - - // A VotingKey is has a lot of wrapping... - // let secret_key = fendermint_crypto::SecretKey::random(&mut rng); - // let public_key = secret_key.public_key(); - // let public_key = libp2p::identity::secp256k1::PublicKey::try_from_bytes( - // &public_key.serialize_compressed(), - // ) - // .expect("secp256k1 public key"); - // let public_key = libp2p::identity::PublicKey::from(public_key); - // let validator_key = VotingKey::from(public_key); - - let validator_key = VotingKey(i); - - validator_states.insert( - validator_key, - ValidatorState { - weight, - votes: HashSet::default(), - highest_vote: 0, - }, - ); - } - - eprintln!("NEW STATE: {validator_states:?}"); - - Ok(VotingState { - chain, - validator_keys: validator_states.keys().cloned().collect(), - validator_states, - last_chain_block: 0, - last_finalized_block: 0, - }) - } - - /// New System Under Test. - fn new_system(&self, state: &Self::State) -> Self::System { - let power_table = state - .validator_states - .iter() - .filter(|(_, vs)| vs.weight > 0) - .map(|(vk, vs)| (vk.clone(), vs.weight)) - .collect(); - - let last_finalized_block = (0, state.block_hash(0).expect("first block is not null")); - - VoteTally::::new(power_table, last_finalized_block) - } - - /// New random command. - fn gen_command( - &self, - u: &mut Unstructured, - state: &Self::State, - ) -> arbitrary::Result { - let cmd = match u.int_in_range(0..=100)? { - // Add a block to the observed chain - i if i < 25 && state.can_extend() => { - let (height, hash) = state.next_chain_block().unwrap(); - VotingCommand::ExtendChain(height, hash) - } - // Add a new (or repeated) vote by a validator, extending its chain - i if i < 70 => { - let vk = u.choose(&state.validator_keys)?; - let high_vote = state.validator_states[vk].highest_vote; - let max_vote: BlockHeight = - min(state.max_chain_height(), high_vote + MAX_VOTE_DELTA); - let min_vote: BlockHeight = high_vote.saturating_sub(MAX_VOTE_DELTA); - - let mut vote_height = u.int_in_range(min_vote..=max_vote)?; - while state.block_hash(vote_height).is_none() { - vote_height -= 1; - } - let vote_hash = state - .block_hash(vote_height) - .expect("the first block not null"); - - VotingCommand::AddVote(vk.clone(), vote_height, vote_hash) - } - // Update the power table - i if i < 80 => { - // Move power from one validator to another (so we never have everyone be zero). - let vk1 = u.choose(&state.validator_keys)?; - let vk2 = u.choose(&state.validator_keys)?; - let w1 = state.validator_states[vk1].weight; - let w2 = state.validator_states[vk2].weight; - let delta = u.int_in_range(0..=w1)?; - - let updates = vec![(vk1.clone(), w1 - delta), (vk2.clone(), w2 + delta)]; - - VotingCommand::UpdatePower(updates) - } - // Finalize a block - i if i < 90 && state.can_finalize() => { - let min_fin = state.last_finalized_block + 1; - let max_fin = min( - state.max_chain_height(), - state.last_finalized_block + MAX_FINALIZED_DELTA, - ); - - let mut fin_height = u.int_in_range(min_fin..=max_fin)?; - while state.block_hash(fin_height).is_none() { - fin_height -= 1; - } - let fin_hash = state - .block_hash(fin_height) - .expect("the first block not null"); - - // Might be a duplicate, which doesn't happen in the real ledger, but it's okay. - VotingCommand::BlockFinalized(fin_height, fin_hash) - } - _ => VotingCommand::FindQuorum, - }; - Ok(cmd) - } - - /// Apply the command on the System Under Test. - fn run_command(&self, system: &mut Self::System, cmd: &Self::Command) -> Self::Result { - eprintln!("RUN CMD {cmd:?}"); - match cmd { - VotingCommand::ExtendChain(block_height, block_hash) => self.atomically_or_err(|| { - system - .add_block(*block_height, block_hash.clone()) - .map(|_| None) - }), - VotingCommand::AddVote(vk, block_height, block_hash) => self.atomically_or_err(|| { - system - .add_vote(vk.clone(), *block_height, block_hash.clone()) - .map(|_| None) - }), - - VotingCommand::UpdatePower(power_table) => { - self.atomically_ok(|| system.update_power_table(power_table.clone()).map(|_| None)) - } - - VotingCommand::BlockFinalized(block_height, block_hash) => self.atomically_ok(|| { - system - .set_finalized(*block_height, block_hash.clone(), None, None) - .map(|_| None) - }), - - VotingCommand::FindQuorum => self.atomically_ok(|| system.find_quorum()), - } - } - - /// Check that the result returned by the tally is correct. - fn check_result(&self, cmd: &Self::Command, pre_state: &Self::State, result: Self::Result) { - match cmd { - VotingCommand::ExtendChain(_, _) => { - result.expect("chain extension should succeed; not simulating unexpected heights"); - } - VotingCommand::AddVote(vk, h, _) => { - if *h < pre_state.last_finalized_block { - result.expect("old votes are ignored"); - } else if pre_state.validator_states[vk].weight == 0 { - result.expect_err("not accepting votes from validators with 0 power"); - } else { - result.expect("vote should succeed; not simulating equivocations"); - } - } - VotingCommand::FindQuorum => { - let result = result.expect("finding quorum should succeed"); - - let height = match result { - None => pre_state.last_finalized_block, - Some((height, hash)) => { - assert!( - pre_state.has_quorum(height), - "find: height {height} should have quorum" - ); - assert!( - height > pre_state.last_finalized_block, - "find: should be above last finalized" - ); - assert!( - height <= pre_state.last_chain_block, - "find: should not be beyond last chain" - ); - assert_eq!( - pre_state.block_hash(height), - Some(hash), - "find: should be correct hash" - ); - height - } - }; - - // Check that the first non-null block after the finalized one has no quorum. - let mut next = height + 1; - if next > pre_state.max_chain_height() || next > pre_state.last_chain_block { - return; - } - while next < pre_state.last_chain_block && pre_state.block_hash(next).is_none() { - next += 1; - } - assert!( - !pre_state.has_quorum(next), - "next block at {next} should not have quorum" - ) - } - other => { - assert!(result.is_ok(), "{other:?} should succeed: {result:?}"); - } - } - } - - /// Update the model state. - fn next_state(&self, cmd: &Self::Command, mut state: Self::State) -> Self::State { - match cmd { - VotingCommand::ExtendChain(h, _) => { - state.last_chain_block = *h; - for vs in state.validator_states.values_mut() { - if vs.votes.contains(h) { - vs.highest_vote = *h; - } - } - } - VotingCommand::AddVote(vk, h, _) => { - let vs = state - .validator_states - .get_mut(vk) - .expect("validator exists"); - - if vs.weight > 0 { - vs.votes.insert(*h); - - if *h <= state.last_chain_block { - vs.highest_vote = max(vs.highest_vote, *h); - } - } - } - VotingCommand::UpdatePower(pt) => { - for (vk, w) in pt { - state - .validator_states - .get_mut(vk) - .expect("validators exist") - .weight = *w; - } - } - VotingCommand::BlockFinalized(h, _) => { - state.last_finalized_block = *h; - state.last_chain_block = max(state.last_chain_block, state.last_finalized_block); - } - VotingCommand::FindQuorum => {} - } - state - } - - /// Compare the tally agains the updated model state. - fn check_system( - &self, - _cmd: &Self::Command, - post_state: &Self::State, - post_system: &Self::System, - ) -> bool { - let last_finalized_block = self.atomically(|| post_system.last_finalized_height()); - - assert_eq!( - last_finalized_block, post_state.last_finalized_block, - "last finalized blocks should match" - ); - - // Stop if we finalized everything. - last_finalized_block < post_state.max_chain_height() - } -} diff --git a/fendermint/vm/topdown/tests/vote_reactor.rs b/fendermint/vm/topdown/tests/vote_reactor.rs index 324030638..dfd6bc0d5 100644 --- a/fendermint/vm/topdown/tests/vote_reactor.rs +++ b/fendermint/vm/topdown/tests/vote_reactor.rs @@ -9,7 +9,7 @@ use async_trait::async_trait; use fendermint_crypto::SecretKey; use fendermint_vm_genesis::ValidatorKey; use fendermint_vm_topdown::observation::Observation; -use fendermint_vm_topdown::sync::TopDownSyncEvent; +use fendermint_vm_topdown::syncer::TopDownSyncEvent; use fendermint_vm_topdown::vote::error::Error; use fendermint_vm_topdown::vote::gossip::GossipClient; use fendermint_vm_topdown::vote::payload::{PowerUpdates, Vote}; @@ -155,7 +155,7 @@ async fn simple_lifecycle() { while client.find_quorum().await.unwrap().is_none() {} let r = client.find_quorum().await.unwrap().unwrap(); - assert_eq!(r.parent_height(), parent_height); + assert_eq!(r.payload().parent_height(), parent_height); let r = client.query_votes(parent_height).await.unwrap().unwrap(); assert_eq!(r.len(), 1); @@ -179,7 +179,7 @@ async fn simple_lifecycle() { let votes = client.query_votes(parent_height2).await.unwrap().unwrap(); assert_eq!(votes.len(), 1); let r = client.find_quorum().await.unwrap().unwrap(); - assert_eq!(r.parent_height(), parent_height2); + assert_eq!(r.payload().parent_height(), parent_height2); client .set_quorum_finalized(parent_height2) @@ -283,7 +283,7 @@ async fn waiting_for_quorum() { for client in &clients { let r = client.find_quorum().await.unwrap().unwrap(); - assert_eq!(r.parent_height(), parent_height3, "should have quorum"); + assert_eq!(r.payload().parent_height(), parent_height3, "should have quorum"); } // make observation on previous heights @@ -301,7 +301,7 @@ async fn waiting_for_quorum() { for client in &clients { let r = client.find_quorum().await.unwrap().unwrap(); assert_eq!( - r.parent_height(), + r.payload().parent_height(), parent_height3, "should have formed quorum on larger height" ); @@ -359,6 +359,6 @@ async fn all_validator_in_sync() { while n.find_quorum().await.unwrap().is_none() {} let r = n.find_quorum().await.unwrap().unwrap(); - assert_eq!(r.parent_height(), parent_height) + assert_eq!(r.payload().parent_height(), parent_height) } } diff --git a/ipld/resolver/src/signed_record.rs b/ipld/resolver/src/signed_record.rs index 82e31d352..a29f46da9 100644 --- a/ipld/resolver/src/signed_record.rs +++ b/ipld/resolver/src/signed_record.rs @@ -65,10 +65,12 @@ where Ok(signed_record) } + #[allow(dead_code)] pub fn record(&self) -> &R { &self.record } + #[allow(dead_code)] pub fn envelope(&self) -> &SignedEnvelope { &self.envelope } diff --git a/ipld/resolver/tests/smoke.rs b/ipld/resolver/tests/smoke.rs index 966029333..5d65caa08 100644 --- a/ipld/resolver/tests/smoke.rs +++ b/ipld/resolver/tests/smoke.rs @@ -28,7 +28,7 @@ use fvm_ipld_encoding::IPLD_RAW; use fvm_ipld_hamt::Hamt; use fvm_shared::{address::Address, ActorID}; use ipc_api::subnet_id::SubnetID; -use ipc_ipld_resolver::{Client, Config, ConnectionConfig, ContentConfig, DiscoveryConfig, Event, MembershipConfig, NetworkConfig, Resolver, Service, SubnetVoteRecord, VoteRecord}; +use ipc_ipld_resolver::{Client, Config, ConnectionConfig, ContentConfig, DiscoveryConfig, Event, MembershipConfig, NetworkConfig, Resolver, Service, SubnetVoteRecord}; use libp2p::{ core::{ muxing::StreamMuxerBox, @@ -224,7 +224,7 @@ async fn single_bootstrap_publish_receive_vote() { // Pubilish vote cluster.agents[0] .client - .publish_vote(SubnetVoteRecord {vote, subnet: subnet_id.clone() }) + .publish_vote(SubnetVoteRecord {vote: vote.clone(), subnet: subnet_id.clone() }) .expect("failed to send vote"); // Receive vote. @@ -234,7 +234,7 @@ async fn single_bootstrap_publish_receive_vote() { .expect("error receiving vote"); if let Event::ReceivedVote(v) = event { - assert_eq!(&*v, vote.record()); + assert_eq!(*v, vote); } else { panic!("unexpected {event:?}") } From ad1d1c72a55ebe2ca6b0b94352dcd7b9ddd39183 Mon Sep 17 00:00:00 2001 From: cryptoAtwill Date: Thu, 3 Oct 2024 17:18:33 +0800 Subject: [PATCH 09/22] clippy --- fendermint/app/src/cmd/debug.rs | 6 ------ fendermint/app/src/cmd/run.rs | 7 +++---- fendermint/app/src/ipc.rs | 1 - fendermint/vm/interpreter/src/chain.rs | 6 +----- fendermint/vm/interpreter/src/fvm/topdown.rs | 10 ++-------- 5 files changed, 6 insertions(+), 24 deletions(-) diff --git a/fendermint/app/src/cmd/debug.rs b/fendermint/app/src/cmd/debug.rs index 1c8431b7c..b66c0a103 100644 --- a/fendermint/app/src/cmd/debug.rs +++ b/fendermint/app/src/cmd/debug.rs @@ -1,15 +1,9 @@ // Copyright 2022-2024 Protocol Labs // SPDX-License-Identifier: Apache-2.0, MIT -use anyhow::{anyhow, Context}; use fendermint_app_options::debug::{ DebugArgs, DebugCommands, DebugExportTopDownEventsArgs, DebugIpcCommands, }; -use fendermint_vm_topdown::proxy::IPCProviderProxy; -use ipc_provider::{ - config::subnet::{EVMSubnet, SubnetConfig}, - IpcProvider, -}; use crate::cmd; diff --git a/fendermint/app/src/cmd/run.rs b/fendermint/app/src/cmd/run.rs index 541b563da..6d8979fca 100644 --- a/fendermint/app/src/cmd/run.rs +++ b/fendermint/app/src/cmd/run.rs @@ -4,7 +4,6 @@ use crate::cmd::key::read_secret_key; use crate::{cmd, options::run::RunArgs, settings::Settings}; use anyhow::{anyhow, bail, Context}; -use async_stm::atomically_or_err; use async_trait::async_trait; use fendermint_abci::ApplicationService; use fendermint_app::ipc::AppParentFinalityQuery; @@ -34,7 +33,7 @@ use fendermint_vm_topdown::proxy::{ use fendermint_vm_topdown::syncer::payload::ParentBlockView; use fendermint_vm_topdown::syncer::poll::ParentPoll; use fendermint_vm_topdown::syncer::store::{InMemoryParentViewStore, ParentViewStore}; -use fendermint_vm_topdown::syncer::{start_parent_syncer, ParentPoller, ParentSyncerConfig, TopDownSyncEvent}; +use fendermint_vm_topdown::syncer::{ParentPoller, ParentSyncerConfig, TopDownSyncEvent}; use fendermint_vm_topdown::vote::error::Error; use fendermint_vm_topdown::vote::gossip::GossipClient; use fendermint_vm_topdown::vote::payload::Vote; @@ -42,7 +41,7 @@ use fendermint_vm_topdown::vote::VoteConfig; use fendermint_vm_topdown::{BlockHeight, Checkpoint, TopdownClient}; use fvm_shared::address::{current_network, Address, Network}; use ipc_api::subnet_id::SubnetID; -use ipc_ipld_resolver::{Event as ResolverEvent, Event, SubnetVoteRecord}; +use ipc_ipld_resolver::{Event as ResolverEvent, SubnetVoteRecord}; use ipc_observability::observe::register_metrics as register_default_metrics; use ipc_provider::config::subnet::{EVMSubnet, SubnetConfig}; use ipc_provider::IpcProvider; @@ -51,7 +50,7 @@ use libp2p::identity::Keypair; use std::sync::Arc; use tendermint_rpc::Client; use tokio::sync::broadcast; -use tokio::sync::broadcast::error::{RecvError, TryRecvError}; +use tokio::sync::broadcast::error::TryRecvError; use tokio::sync::broadcast::Receiver; use tower::ServiceBuilder; use tracing::info; diff --git a/fendermint/app/src/ipc.rs b/fendermint/app/src/ipc.rs index f4d17bb28..d538302f5 100644 --- a/fendermint/app/src/ipc.rs +++ b/fendermint/app/src/ipc.rs @@ -14,7 +14,6 @@ use fvm_ipld_blockstore::Blockstore; use std::sync::Arc; use fendermint_vm_topdown::Checkpoint; -use serde::{Deserialize, Serialize}; /// Queries the LATEST COMMITTED parent finality from the storage pub struct AppParentFinalityQuery diff --git a/fendermint/vm/interpreter/src/chain.rs b/fendermint/vm/interpreter/src/chain.rs index 931169eed..8a1ea62b1 100644 --- a/fendermint/vm/interpreter/src/chain.rs +++ b/fendermint/vm/interpreter/src/chain.rs @@ -8,12 +8,10 @@ use crate::{ signed::{SignedMessageApplyRes, SignedMessageCheckRes, SyntheticMessage, VerifiableMessage}, CheckInterpreter, ExecInterpreter, ProposalInterpreter, QueryInterpreter, }; -use anyhow::{bail, Context}; +use anyhow::Context; use async_stm::atomically; use async_trait::async_trait; -use fendermint_tracing::emit; use fendermint_vm_actor_interface::ipc; -use fendermint_vm_event::ParentFinalityMissingQuorum; use fendermint_vm_genesis::ValidatorKey; use fendermint_vm_message::{ chain::ChainMessage, @@ -21,13 +19,11 @@ use fendermint_vm_message::{ }; use fendermint_vm_resolver::pool::{ResolveKey, ResolvePool}; use fendermint_vm_topdown::launch::Toggle; -use fendermint_vm_topdown::proxy::IPCProviderProxyWithLatency; use fendermint_vm_topdown::{Checkpoint, TopdownClient}; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; use fvm_shared::econ::TokenAmount; use num_traits::Zero; -use std::sync::Arc; /// A resolution pool for bottom-up and top-down checkpoints. pub type CheckpointPool = ResolvePool; diff --git a/fendermint/vm/interpreter/src/fvm/topdown.rs b/fendermint/vm/interpreter/src/fvm/topdown.rs index 3d4060ee8..6b9ef8c7a 100644 --- a/fendermint/vm/interpreter/src/fvm/topdown.rs +++ b/fendermint/vm/interpreter/src/fvm/topdown.rs @@ -7,7 +7,7 @@ use crate::fvm::state::ipc::GatewayCaller; use crate::fvm::state::FvmExecState; use crate::fvm::FvmApplyRet; use anyhow::Context; -use fendermint_vm_topdown::{BlockHeight, Checkpoint}; +use fendermint_vm_topdown::Checkpoint; use fvm_ipld_blockstore::Blockstore; use ipc_api::cross::IpcEnvelope; @@ -23,13 +23,7 @@ pub async fn commit_checkpoint( where DB: Blockstore + Sync + Send + Clone + 'static, { - let prev_checkpoint = if let Some(prev_checkpoint) = - gateway_caller.commit_topdown_checkpoint(state, checkpoint)? - { - Some(prev_checkpoint) - } else { - None - }; + let prev_checkpoint = gateway_caller.commit_topdown_checkpoint(state, checkpoint)?; tracing::debug!("commit checkpoint parsed, prev_checkpoint: {prev_checkpoint:?}"); From 668cbd8254aef8c4f6288165c10d08b1c0da679e Mon Sep 17 00:00:00 2001 From: cryptoAtwill Date: Thu, 3 Oct 2024 17:18:43 +0800 Subject: [PATCH 10/22] fmt --- fendermint/vm/topdown/src/launch.rs | 12 ++---------- fendermint/vm/topdown/src/lib.rs | 8 ++------ fendermint/vm/topdown/tests/vote_reactor.rs | 6 +++++- ipld/resolver/src/client.rs | 2 +- ipld/resolver/tests/smoke.rs | 10 ++++++++-- 5 files changed, 18 insertions(+), 20 deletions(-) diff --git a/fendermint/vm/topdown/src/launch.rs b/fendermint/vm/topdown/src/launch.rs index 6ca243205..a47147cc8 100644 --- a/fendermint/vm/topdown/src/launch.rs +++ b/fendermint/vm/topdown/src/launch.rs @@ -33,11 +33,7 @@ pub async fn run_topdown( validator_key: SecretKey, gossip_client: Gossip, parent_client: ParentClient, - poller_fn: impl FnOnce( - &Checkpoint, - ParentClient, - ParentSyncerConfig, - ) -> Poller, + poller_fn: impl FnOnce(&Checkpoint, ParentClient, ParentSyncerConfig) -> Poller, ) -> anyhow::Result where CheckpointQuery: LaunchQuery + Send + Sync + 'static, @@ -58,11 +54,7 @@ where }) .collect::>(); - let poller = poller_fn( - &checkpoint, - parent_client, - config.syncer.clone(), - ); + let poller = poller_fn(&checkpoint, parent_client, config.syncer.clone()); let internal_event_rx = poller.subscribe(); let syncer_client = start_parent_syncer(config.syncer, poller)?; diff --git a/fendermint/vm/topdown/src/lib.rs b/fendermint/vm/topdown/src/lib.rs index 004cf5c1b..d48077382 100644 --- a/fendermint/vm/topdown/src/lib.rs +++ b/fendermint/vm/topdown/src/lib.rs @@ -196,11 +196,7 @@ impl Checkpoint { impl quickcheck::Arbitrary for TopdownProposal { fn arbitrary(g: &mut quickcheck::Gen) -> Self { - let observation = Observation::new( - u64::arbitrary(g), - Vec::arbitrary(g), - Vec::arbitrary(g) - ); + let observation = Observation::new(u64::arbitrary(g), Vec::arbitrary(g), Vec::arbitrary(g)); let cert = ECDSACertificate::new_of_size(observation, 1); Self { @@ -208,4 +204,4 @@ impl quickcheck::Arbitrary for TopdownProposal { effects: (vec![], vec![]), } } -} \ No newline at end of file +} diff --git a/fendermint/vm/topdown/tests/vote_reactor.rs b/fendermint/vm/topdown/tests/vote_reactor.rs index dfd6bc0d5..c85c387a9 100644 --- a/fendermint/vm/topdown/tests/vote_reactor.rs +++ b/fendermint/vm/topdown/tests/vote_reactor.rs @@ -283,7 +283,11 @@ async fn waiting_for_quorum() { for client in &clients { let r = client.find_quorum().await.unwrap().unwrap(); - assert_eq!(r.payload().parent_height(), parent_height3, "should have quorum"); + assert_eq!( + r.payload().parent_height(), + parent_height3, + "should have quorum" + ); } // make observation on previous heights diff --git a/ipld/resolver/src/client.rs b/ipld/resolver/src/client.rs index 0e241ea2b..2e62311ca 100644 --- a/ipld/resolver/src/client.rs +++ b/ipld/resolver/src/client.rs @@ -7,8 +7,8 @@ use libipld::Cid; use tokio::sync::mpsc::UnboundedSender; use tokio::sync::oneshot; -use crate::vote_record::SubnetVoteRecord; use crate::service::{Request, ResolveResult}; +use crate::vote_record::SubnetVoteRecord; /// A facade to the [`Service`] to provide a nicer interface than message passing would allow on its own. #[derive(Clone)] diff --git a/ipld/resolver/tests/smoke.rs b/ipld/resolver/tests/smoke.rs index 5d65caa08..40636a857 100644 --- a/ipld/resolver/tests/smoke.rs +++ b/ipld/resolver/tests/smoke.rs @@ -28,7 +28,10 @@ use fvm_ipld_encoding::IPLD_RAW; use fvm_ipld_hamt::Hamt; use fvm_shared::{address::Address, ActorID}; use ipc_api::subnet_id::SubnetID; -use ipc_ipld_resolver::{Client, Config, ConnectionConfig, ContentConfig, DiscoveryConfig, Event, MembershipConfig, NetworkConfig, Resolver, Service, SubnetVoteRecord}; +use ipc_ipld_resolver::{ + Client, Config, ConnectionConfig, ContentConfig, DiscoveryConfig, Event, MembershipConfig, + NetworkConfig, Resolver, Service, SubnetVoteRecord, +}; use libp2p::{ core::{ muxing::StreamMuxerBox, @@ -224,7 +227,10 @@ async fn single_bootstrap_publish_receive_vote() { // Pubilish vote cluster.agents[0] .client - .publish_vote(SubnetVoteRecord {vote: vote.clone(), subnet: subnet_id.clone() }) + .publish_vote(SubnetVoteRecord { + vote: vote.clone(), + subnet: subnet_id.clone(), + }) .expect("failed to send vote"); // Receive vote. From d07aff508d05fc2e58f179409fcc18bfdc752201 Mon Sep 17 00:00:00 2001 From: cryptoAtwill Date: Thu, 3 Oct 2024 17:53:19 +0800 Subject: [PATCH 11/22] fix tests --- fendermint/crypto/src/quorum.rs | 16 ++++++++++++---- .../vm/message/golden/chain/ipc_top_down.cbor | 1 + .../vm/message/golden/chain/ipc_top_down.txt | 1 + fendermint/vm/topdown/src/vote/store.rs | 4 +++- fendermint/vm/topdown/src/vote/tally.rs | 2 +- 5 files changed, 18 insertions(+), 6 deletions(-) create mode 100644 fendermint/vm/message/golden/chain/ipc_top_down.cbor create mode 100644 fendermint/vm/message/golden/chain/ipc_top_down.txt diff --git a/fendermint/crypto/src/quorum.rs b/fendermint/crypto/src/quorum.rs index 379788636..65ea111f3 100644 --- a/fendermint/crypto/src/quorum.rs +++ b/fendermint/crypto/src/quorum.rs @@ -130,7 +130,9 @@ mod tests { let mut quorum = ECDSACertificate::new_of_size(payload.clone(), sks.len()); let ratio = Ratio::new(2, 3); for (i, sk) in sks.iter().enumerate() { - let sig = RecoverableECDSASignature::sign(sk, &payload).unwrap(); + let sig = + RecoverableECDSASignature::sign(sk, &fvm_ipld_encoding::to_vec(&payload).unwrap()) + .unwrap(); quorum.set_signature(i, &sk.public_key(), sig).unwrap(); } @@ -153,7 +155,9 @@ mod tests { let mut quorum = ECDSACertificate::new_of_size(payload.clone(), sks.len()); for (i, sk) in sks.iter().enumerate() { - let sig = RecoverableECDSASignature::sign(sk, &payload).unwrap(); + let sig = + RecoverableECDSASignature::sign(sk, &fvm_ipld_encoding::to_vec(&payload).unwrap()) + .unwrap(); if i % 3 == 0 { quorum.set_signature(i, &sk.public_key(), sig).unwrap(); } @@ -177,7 +181,9 @@ mod tests { let mut quorum = ECDSACertificate::new_of_size(payload.clone(), sks.len()); for (i, sk) in sks.iter().enumerate() { - let sig = RecoverableECDSASignature::sign(sk, &payload).unwrap(); + let sig = + RecoverableECDSASignature::sign(sk, &fvm_ipld_encoding::to_vec(&payload).unwrap()) + .unwrap(); quorum.set_signature(i, &sk.public_key(), sig).unwrap(); } @@ -207,7 +213,9 @@ mod tests { let mut quorum = ECDSACertificate::new_of_size(payload.clone(), sks.len()); let mut should_signs = vec![]; for (i, sk) in sks.iter().enumerate() { - let sig = RecoverableECDSASignature::sign(sk, &payload).unwrap(); + let sig = + RecoverableECDSASignature::sign(sk, &fvm_ipld_encoding::to_vec(&payload).unwrap()) + .unwrap(); let should_sign = random::(); if should_sign { diff --git a/fendermint/vm/message/golden/chain/ipc_top_down.cbor b/fendermint/vm/message/golden/chain/ipc_top_down.cbor new file mode 100644 index 000000000..c9db04135 --- /dev/null +++ b/fendermint/vm/message/golden/chain/ipc_top_down.cbor @@ -0,0 +1 @@ +a163497063a16b546f70446f776e45786563a26463657274a2677061796c6f6164a36d706172656e745f6865696768741bffffffffffffffff6b706172656e745f6861736885189f183918f9188b18357763756d756c61746976655f656666656374735f636f6d6d8801183a186018a7184e182f181c183a6a7369676e61747572657381f66765666665637473828080 \ No newline at end of file diff --git a/fendermint/vm/message/golden/chain/ipc_top_down.txt b/fendermint/vm/message/golden/chain/ipc_top_down.txt new file mode 100644 index 000000000..e931e1b07 --- /dev/null +++ b/fendermint/vm/message/golden/chain/ipc_top_down.txt @@ -0,0 +1 @@ +Ipc(TopDownExec(TopdownProposal { cert: ECDSACertificate { payload: Observation { parent_height: 18446744073709551615, parent_hash: [159, 57, 249, 139, 53], cumulative_effects_comm: [1, 58, 96, 167, 78, 47, 28, 58] }, signatures: [None] }, effects: ([], []) })) \ No newline at end of file diff --git a/fendermint/vm/topdown/src/vote/store.rs b/fendermint/vm/topdown/src/vote/store.rs index 576751da6..7a3492183 100644 --- a/fendermint/vm/topdown/src/vote/store.rs +++ b/fendermint/vm/topdown/src/vote/store.rs @@ -213,7 +213,9 @@ mod tests { ); let agg = VoteAgg(HashMap::from_iter(votes.iter().map(|v| (v.voter(), v)))); - let weights = agg.observation_weights(&HashMap::from_iter(powers)); + let mut weights = agg.observation_weights(&HashMap::from_iter(powers)); + weights.sort_by(|a, b| a.1.cmp(&b.1)); + assert_eq!(weights, vec![(&observation1, 1), (&observation2, 2),]) } } diff --git a/fendermint/vm/topdown/src/vote/tally.rs b/fendermint/vm/topdown/src/vote/tally.rs index 03e3d3183..8bcfc819a 100644 --- a/fendermint/vm/topdown/src/vote/tally.rs +++ b/fendermint/vm/topdown/src/vote/tally.rs @@ -73,7 +73,7 @@ impl VoteTally { /// The equivalent formula can be found in CometBFT [here](https://github.com/cometbft/cometbft/blob/a8991d63e5aad8be82b90329b55413e3a4933dc0/types/vote_set.go#L307). pub fn quorum_threshold(&self) -> Weight { let total_weight: Weight = self.power_table.values().sum(); - total_weight * self.quorum_ratio.numer() / self.quorum_ratio.denom() + total_weight * self.quorum_ratio.numer() / self.quorum_ratio.denom() + 1 } /// Return the height of the first entry in the chain. From b8627971ce051075a2e3e9b7ea3d3e25c89c6304 Mon Sep 17 00:00:00 2001 From: cryptoAtwill Date: Mon, 28 Oct 2024 22:18:55 +0800 Subject: [PATCH 12/22] refactor launch --- fendermint/vm/topdown/src/launch.rs | 87 +++++++++++++---------- fendermint/vm/topdown/src/syncer/mod.rs | 63 +++++++++-------- fendermint/vm/topdown/src/vote/mod.rs | 91 +++++++++++++------------ 3 files changed, 134 insertions(+), 107 deletions(-) diff --git a/fendermint/vm/topdown/src/launch.rs b/fendermint/vm/topdown/src/launch.rs index a47147cc8..8872bd361 100644 --- a/fendermint/vm/topdown/src/launch.rs +++ b/fendermint/vm/topdown/src/launch.rs @@ -2,11 +2,11 @@ // SPDX-License-Identifier: Apache-2.0, MIT use crate::proxy::ParentQueryProxy; -use crate::syncer::{start_parent_syncer, ParentPoller, ParentSyncerConfig}; +use crate::syncer::{ParentPoller, ParentSyncerConfig, ParentSyncerReactorClient}; use crate::vote::gossip::GossipClient; use crate::vote::payload::PowerUpdates; use crate::vote::store::InMemoryVoteStore; -use crate::vote::{start_vote_reactor, StartVoteReactorParams}; +use crate::vote::{StartVoteReactorParams, VoteReactorClient}; use crate::{BlockHeight, Checkpoint, Config, TopdownClient, TopdownProposal}; use anyhow::anyhow; use cid::Cid; @@ -33,7 +33,7 @@ pub async fn run_topdown( validator_key: SecretKey, gossip_client: Gossip, parent_client: ParentClient, - poller_fn: impl FnOnce(&Checkpoint, ParentClient, ParentSyncerConfig) -> Poller, + poller_fn: impl FnOnce(&Checkpoint, ParentClient, ParentSyncerConfig) -> Poller + Send + 'static, ) -> anyhow::Result where CheckpointQuery: LaunchQuery + Send + Sync + 'static, @@ -41,39 +41,54 @@ where Poller: ParentPoller + Send + Sync + 'static, ParentClient: ParentQueryProxy + Send + Sync + 'static, { - let query = Arc::new(query); - let checkpoint = query_starting_checkpoint(&query, &parent_client).await?; - - let power_table = query_starting_committee(&query).await?; - let power_table = power_table - .into_iter() - .map(|v| { - let vk = ValidatorKey::new(v.public_key.0); - let w = v.power.0; - (vk, w) - }) - .collect::>(); - - let poller = poller_fn(&checkpoint, parent_client, config.syncer.clone()); - let internal_event_rx = poller.subscribe(); - - let syncer_client = start_parent_syncer(config.syncer, poller)?; - - let voting_client = start_vote_reactor(StartVoteReactorParams { - config: config.voting, - validator_key, - power_table, - last_finalized_height: checkpoint.target_height(), - latest_child_block: query.latest_chain_block()?, - gossip: gossip_client, - vote_store: InMemoryVoteStore::default(), - internal_event_listener: internal_event_rx, - })?; - - tracing::info!( - finality = checkpoint.to_string(), - "launching parent syncer with last committed checkpoint" - ); + let (syncer_client, syncer_rx) = + ParentSyncerReactorClient::new(config.syncer.request_channel_size); + let (voting_client, voting_rx) = VoteReactorClient::new(config.voting.req_channel_buffer_size); + + tokio::spawn(async move { + let query = Arc::new(query); + let checkpoint = query_starting_checkpoint(&query, &parent_client) + .await + .expect("should be able to query starting checkpoint"); + + let power_table = query_starting_committee(&query) + .await + .expect("should be able to query starting committee"); + let power_table = power_table + .into_iter() + .map(|v| { + let vk = ValidatorKey::new(v.public_key.0); + let w = v.power.0; + (vk, w) + }) + .collect::>(); + + let poller = poller_fn(&checkpoint, parent_client, config.syncer.clone()); + let internal_event_rx = poller.subscribe(); + + ParentSyncerReactorClient::start_reactor(syncer_rx, poller, config.syncer); + VoteReactorClient::start_reactor( + voting_rx, + StartVoteReactorParams { + config: config.voting, + validator_key, + power_table, + last_finalized_height: checkpoint.target_height(), + latest_child_block: query + .latest_chain_block() + .expect("should query latest chain block"), + gossip: gossip_client, + vote_store: InMemoryVoteStore::default(), + internal_event_listener: internal_event_rx, + }, + ) + .expect("cannot start vote reactor"); + + tracing::info!( + finality = checkpoint.to_string(), + "launching parent syncer with last committed checkpoint" + ); + }); Ok(TopdownClient { syncer: syncer_client, diff --git a/fendermint/vm/topdown/src/syncer/mod.rs b/fendermint/vm/topdown/src/syncer/mod.rs index 4683c9e14..11b4eb814 100644 --- a/fendermint/vm/topdown/src/syncer/mod.rs +++ b/fendermint/vm/topdown/src/syncer/mod.rs @@ -48,6 +48,39 @@ pub struct ParentSyncerReactorClient { tx: mpsc::Sender, } +impl ParentSyncerReactorClient { + pub fn new(request_channel_size: usize) -> (Self, mpsc::Receiver) { + let (tx, rx) = mpsc::channel(request_channel_size); + (Self { tx }, rx) + } + + pub fn start_reactor( + mut rx: mpsc::Receiver, + mut poller: P, + config: ParentSyncerConfig, + ) { + tokio::spawn(async move { + let polling_interval = config.polling_interval; + + loop { + select! { + _ = tokio::time::sleep(polling_interval) => { + if let Err(e) = poller.try_poll().await { + tracing::error!(err = e.to_string(), "cannot sync with parent"); + } + } + req = rx.recv() => { + let Some(req) = req else { break }; + handle_request(req, &mut poller); + } + } + } + + tracing::warn!("parent syncer stopped") + }); + } +} + /// Polls the parent block view #[async_trait] pub trait ParentPoller { @@ -69,34 +102,6 @@ pub trait ParentPoller { ) -> anyhow::Result>>; } -pub fn start_parent_syncer( - config: ParentSyncerConfig, - mut poller: P, -) -> anyhow::Result { - let (tx, mut rx) = mpsc::channel(config.request_channel_size); - - tokio::spawn(async move { - let polling_interval = config.polling_interval; - - loop { - select! { - _ = tokio::time::sleep(polling_interval) => { - if let Err(e) = poller.try_poll().await { - tracing::error!(err = e.to_string(), "cannot sync with parent"); - } - } - req = rx.recv() => { - let Some(req) = req else { break }; - handle_request(req, &mut poller); - } - } - } - - tracing::warn!("parent syncer stopped") - }); - Ok(ParentSyncerReactorClient { tx }) -} - impl ParentSyncerReactorClient { /// Marks the height as finalized. /// There is no need to wait for ack from the reactor @@ -117,7 +122,7 @@ impl ParentSyncerReactorClient { } } -enum ParentSyncerRequest { +pub enum ParentSyncerRequest { /// A new parent height is finalized Finalized(Checkpoint), QueryParentBlockViews { diff --git a/fendermint/vm/topdown/src/vote/mod.rs b/fendermint/vm/topdown/src/vote/mod.rs index 64e344fba..90df66503 100644 --- a/fendermint/vm/topdown/src/vote/mod.rs +++ b/fendermint/vm/topdown/src/vote/mod.rs @@ -47,6 +47,54 @@ pub struct VoteReactorClient { tx: mpsc::Sender, } +impl VoteReactorClient { + pub fn new(req_channel_buffer_size: usize) -> (Self, mpsc::Receiver) { + let (tx, rx) = mpsc::channel(req_channel_buffer_size); + (Self { tx }, rx) + } + + pub fn start_reactor< + G: GossipClient + Send + Sync + 'static, + V: VoteStore + Send + Sync + 'static, + >( + rx: mpsc::Receiver, + params: StartVoteReactorParams, + ) -> anyhow::Result<()> { + let config = params.config; + let vote_tally = VoteTally::new( + params.power_table, + params.last_finalized_height, + params.vote_store, + )?; + + let validator_key = params.validator_key; + let internal_event_listener = params.internal_event_listener; + let latest_child_block = params.latest_child_block; + let gossip = params.gossip; + + tokio::spawn(async move { + let sleep = Duration::new(config.voting_sleep_interval_sec, 0); + + let inner = VotingHandler { + validator_key, + req_rx: rx, + internal_event_listener, + vote_tally, + latest_child_block, + config, + gossip, + }; + let mut machine = OperationStateMachine::new(inner); + loop { + machine = machine.step().await; + tokio::time::sleep(sleep).await; + } + }); + + Ok(()) + } +} + pub struct StartVoteReactorParams { pub config: VoteConfig, pub validator_key: SecretKey, @@ -58,47 +106,6 @@ pub struct StartVoteReactorParams { pub internal_event_listener: broadcast::Receiver, } -pub fn start_vote_reactor< - G: GossipClient + Send + Sync + 'static, - V: VoteStore + Send + Sync + 'static, ->( - params: StartVoteReactorParams, -) -> anyhow::Result { - let config = params.config; - let (tx, rx) = mpsc::channel(config.req_channel_buffer_size); - let vote_tally = VoteTally::new( - params.power_table, - params.last_finalized_height, - params.vote_store, - )?; - - let validator_key = params.validator_key; - let internal_event_listener = params.internal_event_listener; - let latest_child_block = params.latest_child_block; - let gossip = params.gossip; - - tokio::spawn(async move { - let sleep = Duration::new(config.voting_sleep_interval_sec, 0); - - let inner = VotingHandler { - validator_key, - req_rx: rx, - internal_event_listener, - vote_tally, - latest_child_block, - config, - gossip, - }; - let mut machine = OperationStateMachine::new(inner); - loop { - machine = machine.step().await; - tokio::time::sleep(sleep).await; - } - }); - - Ok(VoteReactorClient { tx }) -} - impl VoteReactorClient { async fn request) -> VoteReactorRequest>( &self, @@ -184,7 +191,7 @@ impl VoteReactorClient { } } -enum VoteReactorRequest { +pub enum VoteReactorRequest { /// A new child subnet block is mined, this is the fendermint block NewLocalBlockMined(BlockHeight), /// Query the current operation mode of the vote tally state machine From 0d19a5269588a763907d4524448451cff36eb327 Mon Sep 17 00:00:00 2001 From: cryptoAtwill Date: Tue, 29 Oct 2024 12:39:58 +0800 Subject: [PATCH 13/22] fix find topdown proposal --- fendermint/app/src/cmd/run.rs | 5 +++-- fendermint/vm/topdown/src/lib.rs | 5 ++++- fendermint/vm/topdown/src/syncer/mod.rs | 20 +++++++++++++++++--- fendermint/vm/topdown/src/vote/mod.rs | 4 ++-- fendermint/vm/topdown/tests/vote_reactor.rs | 2 +- 5 files changed, 27 insertions(+), 9 deletions(-) diff --git a/fendermint/app/src/cmd/run.rs b/fendermint/app/src/cmd/run.rs index 5b8991419..e59134041 100644 --- a/fendermint/app/src/cmd/run.rs +++ b/fendermint/app/src/cmd/run.rs @@ -48,6 +48,7 @@ use ipc_provider::IpcProvider; use libp2p::identity::secp256k1; use libp2p::identity::Keypair; use std::sync::Arc; +use std::time::Duration; use tendermint_rpc::Client; use tokio::sync::broadcast; use tokio::sync::broadcast::error::TryRecvError; @@ -274,7 +275,7 @@ async fn run(settings: Settings) -> anyhow::Result<()> { request_channel_size: 1024, broadcast_channel_size: 1024, chain_head_delay: topdown_config.chain_head_delay, - polling_interval: topdown_config.polling_interval, + polling_interval_millis: Duration::from_millis(100), max_store_blocks: topdown_config.parent_view_store_max_blocks.unwrap_or(2000), sync_many: true, observation: ObservationConfig { @@ -285,7 +286,7 @@ async fn run(settings: Settings) -> anyhow::Result<()> { req_channel_buffer_size: 1024, req_batch_processing_size: 10, gossip_req_processing_size: 256, - voting_sleep_interval_sec: 10, + voting_sleep_interval_millis: 100, }, }; diff --git a/fendermint/vm/topdown/src/lib.rs b/fendermint/vm/topdown/src/lib.rs index d48077382..ef0e0f5d0 100644 --- a/fendermint/vm/topdown/src/lib.rs +++ b/fendermint/vm/topdown/src/lib.rs @@ -67,6 +67,7 @@ impl TopdownClient { pub async fn find_topdown_proposal(&self) -> anyhow::Result> { let Some(quorum_cert) = self.voting.find_quorum().await? else { + tracing::debug!("no quorum cert found"); return Ok(None); }; @@ -76,10 +77,12 @@ impl TopdownClient { .await? else { // absorb the error, dont alert the caller + tracing::warn!("no parent block views found"); return Ok(None); }; - let mut linear = LinearizedParentBlockView::from(quorum_cert.payload()); + let latest_checkpoint = self.syncer.latest_checkpoint().await?; + let mut linear = LinearizedParentBlockView::from(&latest_checkpoint); let mut xnet_msgs = vec![]; let mut validator_changes = vec![]; diff --git a/fendermint/vm/topdown/src/syncer/mod.rs b/fendermint/vm/topdown/src/syncer/mod.rs index 11b4eb814..dd089bc8b 100644 --- a/fendermint/vm/topdown/src/syncer/mod.rs +++ b/fendermint/vm/topdown/src/syncer/mod.rs @@ -33,8 +33,8 @@ pub struct ParentSyncerConfig { /// conservative and avoid other from rejecting the proposal because they don't see the /// height as final yet. pub chain_head_delay: BlockHeight, - /// Parent syncing cron period, in seconds - pub polling_interval: Duration, + /// Parent syncing cron period, in millis + pub polling_interval_millis: Duration, /// Max number of un-finalized parent blocks that should be stored in the store pub max_store_blocks: BlockHeight, /// Attempts to sync as many block as possible till the finalized chain head @@ -60,7 +60,7 @@ impl ParentSyncerReactorClient { config: ParentSyncerConfig, ) { tokio::spawn(async move { - let polling_interval = config.polling_interval; + let polling_interval = config.polling_interval_millis; loop { select! { @@ -110,6 +110,16 @@ impl ParentSyncerReactorClient { Ok(()) } + pub async fn latest_checkpoint( + &self, + ) -> anyhow::Result { + let (tx, rx) = oneshot::channel(); + self.tx + .send(ParentSyncerRequest::QueryLatestCheckpoint(tx)) + .await?; + Ok(rx.await?) + } + pub async fn query_parent_block_view( &self, to: BlockHeight, @@ -125,6 +135,7 @@ impl ParentSyncerReactorClient { pub enum ParentSyncerRequest { /// A new parent height is finalized Finalized(Checkpoint), + QueryLatestCheckpoint(oneshot::Sender), QueryParentBlockViews { to: BlockHeight, tx: oneshot::Sender>>>, @@ -153,5 +164,8 @@ fn handle_request( }); let _ = tx.send(r); } + ParentSyncerRequest::QueryLatestCheckpoint(tx) => { + let _ = tx.send(poller.last_checkpoint().clone()); + } } } diff --git a/fendermint/vm/topdown/src/vote/mod.rs b/fendermint/vm/topdown/src/vote/mod.rs index 90df66503..18bcae5bc 100644 --- a/fendermint/vm/topdown/src/vote/mod.rs +++ b/fendermint/vm/topdown/src/vote/mod.rs @@ -38,7 +38,7 @@ pub struct VoteConfig { /// The number of vote recording requests the reactor should process per run before handling other tasks pub gossip_req_processing_size: usize, /// The time to sleep for voting loop if nothing happens - pub voting_sleep_interval_sec: u64, + pub voting_sleep_interval_millis: u64, } /// The client to interact with the vote reactor @@ -73,7 +73,7 @@ impl VoteReactorClient { let gossip = params.gossip; tokio::spawn(async move { - let sleep = Duration::new(config.voting_sleep_interval_sec, 0); + let sleep = Duration::from_millis(config.voting_sleep_interval_millis); let inner = VotingHandler { validator_key, diff --git a/fendermint/vm/topdown/tests/vote_reactor.rs b/fendermint/vm/topdown/tests/vote_reactor.rs index c85c387a9..15590618a 100644 --- a/fendermint/vm/topdown/tests/vote_reactor.rs +++ b/fendermint/vm/topdown/tests/vote_reactor.rs @@ -62,7 +62,7 @@ fn default_config() -> VoteConfig { req_channel_buffer_size: 1024, req_batch_processing_size: 10, gossip_req_processing_size: 10, - voting_sleep_interval_sec: 1, + voting_sleep_interval_millis: 1, } } From 0000254e3c78ee1d9f55e61035c64e017ea16207 Mon Sep 17 00:00:00 2001 From: cryptoAtwill Date: Tue, 29 Oct 2024 13:12:10 +0800 Subject: [PATCH 14/22] rename parameters --- fendermint/vm/topdown/src/lib.rs | 12 ++++++------ fendermint/vm/topdown/src/observation.rs | 20 ++++++++++---------- fendermint/vm/topdown/src/syncer/mod.rs | 4 +--- fendermint/vm/topdown/src/vote/mod.rs | 13 ++++++++++--- fendermint/vm/topdown/src/vote/tally.rs | 12 ++++++------ 5 files changed, 33 insertions(+), 28 deletions(-) diff --git a/fendermint/vm/topdown/src/lib.rs b/fendermint/vm/topdown/src/lib.rs index ef0e0f5d0..7eb63d68a 100644 --- a/fendermint/vm/topdown/src/lib.rs +++ b/fendermint/vm/topdown/src/lib.rs @@ -73,7 +73,7 @@ impl TopdownClient { let Ok(views) = self .syncer - .query_parent_block_view(quorum_cert.payload().parent_height) + .query_parent_block_view(quorum_cert.payload().parent_subnet_height) .await? else { // absorb the error, dont alert the caller @@ -90,7 +90,7 @@ impl TopdownClient { for maybe_view in views { let Some(v) = maybe_view else { tracing::error!( - till = quorum_cert.payload().parent_height, + till = quorum_cert.payload().parent_subnet_height, "parent block view does not have all the data" ); return Ok(None); @@ -164,8 +164,8 @@ impl Display for Checkpoint { write!( f, "Checkpoint(version = 1, height = {}, block_hash = {}, effects = {})", - v.parent_height, - hex::encode(&v.parent_hash), + v.parent_subnet_height, + hex::encode(&v.parent_subnet_hash), hex::encode(&v.cumulative_effects_comm) ) } @@ -180,13 +180,13 @@ impl Checkpoint { pub fn target_height(&self) -> BlockHeight { match self { - Checkpoint::V1(b) => b.parent_height, + Checkpoint::V1(b) => b.parent_subnet_height, } } pub fn target_hash(&self) -> &Bytes { match self { - Checkpoint::V1(b) => &b.parent_hash, + Checkpoint::V1(b) => &b.parent_subnet_hash, } } diff --git a/fendermint/vm/topdown/src/observation.rs b/fendermint/vm/topdown/src/observation.rs index d83331c6d..cfa5d9814 100644 --- a/fendermint/vm/topdown/src/observation.rs +++ b/fendermint/vm/topdown/src/observation.rs @@ -32,13 +32,13 @@ pub struct ObservationConfig { /// The content that validators gossip among each other. #[derive(Serialize, Deserialize, Hash, Debug, Clone, Eq, PartialEq, Arbitrary)] pub struct Observation { - pub(crate) parent_height: u64, + pub(crate) parent_subnet_height: u64, /// The hash of the chain unit at that height. Usually a block hash, but could /// be another entity (e.g. tipset CID), depending on the parent chain /// and our interface to it. For example, if the parent is a Filecoin network, /// this would be a tipset CID coerced into a block hash if queried through /// the Eth API, or the tipset CID as-is if accessed through the Filecoin API. - pub(crate) parent_hash: Bytes, + pub(crate) parent_subnet_hash: Bytes, /// A rolling/cumulative commitment to topdown effects since the beginning of /// time, including the ones in this block. pub(crate) cumulative_effects_comm: Bytes, @@ -100,7 +100,7 @@ pub fn deduce_new_observation( let observation = agg.into_observation()?; tracing::info!( - height = observation.parent_height, + height = observation.parent_subnet_height, "new observation derived" ); @@ -167,8 +167,8 @@ impl CertifiedObservation { impl Observation { pub fn new(parent_height: BlockHeight, parent_hash: Bytes, commitment: Bytes) -> Self { Self { - parent_height, - parent_hash, + parent_subnet_height: parent_height, + parent_subnet_hash: parent_hash, cumulative_effects_comm: commitment, } } @@ -179,8 +179,8 @@ impl Display for Observation { write!( f, "Observation(parent_height={}, parent_hash={}, commitment={})", - self.parent_height, - hex::encode(&self.parent_hash), + self.parent_subnet_height, + hex::encode(&self.parent_subnet_hash), hex::encode(&self.cumulative_effects_comm), ) } @@ -188,7 +188,7 @@ impl Display for Observation { impl Observation { pub fn parent_height(&self) -> BlockHeight { - self.parent_height + self.parent_subnet_height } } @@ -218,8 +218,8 @@ impl From<&Checkpoint> for LinearizedParentBlockView { impl From<&Observation> for LinearizedParentBlockView { fn from(value: &Observation) -> Self { LinearizedParentBlockView { - parent_height: value.parent_height, - parent_hash: Some(value.parent_hash.clone()), + parent_height: value.parent_subnet_height, + parent_hash: Some(value.parent_subnet_hash.clone()), cumulative_effects_comm: value.cumulative_effects_comm.clone(), } } diff --git a/fendermint/vm/topdown/src/syncer/mod.rs b/fendermint/vm/topdown/src/syncer/mod.rs index dd089bc8b..0269eebc7 100644 --- a/fendermint/vm/topdown/src/syncer/mod.rs +++ b/fendermint/vm/topdown/src/syncer/mod.rs @@ -110,9 +110,7 @@ impl ParentSyncerReactorClient { Ok(()) } - pub async fn latest_checkpoint( - &self, - ) -> anyhow::Result { + pub async fn latest_checkpoint(&self) -> anyhow::Result { let (tx, rx) = oneshot::channel(); self.tx .send(ParentSyncerRequest::QueryLatestCheckpoint(tx)) diff --git a/fendermint/vm/topdown/src/vote/mod.rs b/fendermint/vm/topdown/src/vote/mod.rs index 18bcae5bc..0089a2bfa 100644 --- a/fendermint/vm/topdown/src/vote/mod.rs +++ b/fendermint/vm/topdown/src/vote/mod.rs @@ -295,9 +295,16 @@ where if !self.vote_tally.check_quorum_cert(cert.borrow()) { let _ = tx.send(false); } else { - let _ = tx.send( - self.vote_tally.last_finalized_height() + 1 == cert.payload().parent_height, - ); + let is_future = self.vote_tally.last_finalized_height() + < cert.payload().parent_subnet_height; + if !is_future { + tracing::error!( + finalized = self.vote_tally.last_finalized_height(), + cert = cert.payload().parent_subnet_height, + "cert block number lower than latest finalized height" + ); + } + let _ = tx.send(is_future); } } } diff --git a/fendermint/vm/topdown/src/vote/tally.rs b/fendermint/vm/topdown/src/vote/tally.rs index 8bcfc819a..0aba56ab7 100644 --- a/fendermint/vm/topdown/src/vote/tally.rs +++ b/fendermint/vm/topdown/src/vote/tally.rs @@ -293,7 +293,7 @@ mod tests { vote_tally.add_vote(vote).unwrap(); let mut obs2 = random_observation(); - obs2.parent_height = obs.parent_height; + obs2.parent_subnet_height = obs.parent_subnet_height; let vote = Vote::v1_checked(CertifiedObservation::sign(obs2, 100, &validators[0].0).unwrap()) .unwrap(); @@ -315,7 +315,7 @@ mod tests { let observation = random_observation(); vote_tally - .set_finalized(observation.parent_height - 1) + .set_finalized(observation.parent_subnet_height - 1) .unwrap(); for validator in validators { @@ -351,10 +351,10 @@ mod tests { let observation1 = random_observation(); let mut observation2 = observation1.clone(); - observation2.parent_hash = vec![1]; + observation2.parent_subnet_hash = vec![1]; vote_tally - .set_finalized(observation1.parent_height - 1) + .set_finalized(observation1.parent_subnet_height - 1) .unwrap(); for validator in validators_grp1 { @@ -390,7 +390,7 @@ mod tests { let observation = random_observation(); vote_tally - .set_finalized(observation.parent_height - 1) + .set_finalized(observation.parent_subnet_height - 1) .unwrap(); for validator in validators { @@ -425,7 +425,7 @@ mod tests { let observation = random_observation(); vote_tally - .set_finalized(observation.parent_height - 1) + .set_finalized(observation.parent_subnet_height - 1) .unwrap(); for (count, validator) in validators.iter().enumerate() { From b5e7841def7fcd4ce3d09b521e3e4bcff5fb799b Mon Sep 17 00:00:00 2001 From: cryptoAtwill Date: Tue, 29 Oct 2024 17:29:42 +0800 Subject: [PATCH 15/22] refactor poller reactor --- fendermint/app/src/cmd/run.rs | 20 +-- fendermint/vm/topdown/src/error.rs | 18 --- fendermint/vm/topdown/src/launch.rs | 10 +- fendermint/vm/topdown/src/lib.rs | 61 ++------ fendermint/vm/topdown/src/syncer/mod.rs | 176 +++++++++++----------- fendermint/vm/topdown/src/syncer/poll.rs | 35 ++--- fendermint/vm/topdown/src/syncer/store.rs | 29 ++-- 7 files changed, 145 insertions(+), 204 deletions(-) delete mode 100644 fendermint/vm/topdown/src/error.rs diff --git a/fendermint/app/src/cmd/run.rs b/fendermint/app/src/cmd/run.rs index e59134041..5f6bb215f 100644 --- a/fendermint/app/src/cmd/run.rs +++ b/fendermint/app/src/cmd/run.rs @@ -30,7 +30,6 @@ use fendermint_vm_topdown::observe::register_metrics as register_topdown_metrics use fendermint_vm_topdown::proxy::{ IPCProviderProxy, IPCProviderProxyWithLatency, ParentQueryProxy, }; -use fendermint_vm_topdown::syncer::payload::ParentBlockView; use fendermint_vm_topdown::syncer::poll::ParentPoll; use fendermint_vm_topdown::syncer::store::{InMemoryParentViewStore, ParentViewStore}; use fendermint_vm_topdown::syncer::{ParentPoller, ParentSyncerConfig, TopDownSyncEvent}; @@ -38,7 +37,7 @@ use fendermint_vm_topdown::vote::error::Error; use fendermint_vm_topdown::vote::gossip::GossipClient; use fendermint_vm_topdown::vote::payload::Vote; use fendermint_vm_topdown::vote::VoteConfig; -use fendermint_vm_topdown::{BlockHeight, Checkpoint, TopdownClient}; +use fendermint_vm_topdown::{Checkpoint, TopdownClient}; use fvm_shared::address::{current_network, Address, Network}; use ipc_api::subnet_id::SubnetID; use ipc_ipld_resolver::{Event as ResolverEvent, SubnetVoteRecord}; @@ -276,6 +275,7 @@ async fn run(settings: Settings) -> anyhow::Result<()> { broadcast_channel_size: 1024, chain_head_delay: topdown_config.chain_head_delay, polling_interval_millis: Duration::from_millis(100), + max_requests_per_loop: 10, max_store_blocks: topdown_config.parent_view_store_max_blocks.unwrap_or(2000), sync_many: true, observation: ObservationConfig { @@ -299,6 +299,7 @@ async fn run(settings: Settings) -> anyhow::Result<()> { .ok_or_else(|| anyhow!("topdown enabled but ipld is not, enable ipld first"))?; let client = run_topdown( + parent_view_store.clone(), app_parent_finality_query, config, validator @@ -540,15 +541,17 @@ struct TendermintAwareParentPoller { #[async_trait] impl ParentPoller for TendermintAwareParentPoller where - S: ParentViewStore + Send + Sync + 'static, + S: ParentViewStore + Send + Sync + 'static + Clone, P: Send + Sync + 'static + ParentQueryProxy, { + type Store = S; + fn subscribe(&self) -> Receiver { self.inner.subscribe() } - fn last_checkpoint(&self) -> &Checkpoint { - self.inner.last_checkpoint() + fn store(&self) -> Self::Store { + self.inner.store() } fn finalize(&mut self, checkpoint: Checkpoint) -> anyhow::Result<()> { @@ -562,13 +565,6 @@ where } self.inner.try_poll().await } - - fn dump_parent_block_views( - &self, - to: BlockHeight, - ) -> anyhow::Result>> { - self.inner.dump_parent_block_views(to) - } } impl TendermintAwareParentPoller { diff --git a/fendermint/vm/topdown/src/error.rs b/fendermint/vm/topdown/src/error.rs deleted file mode 100644 index eef6090d7..000000000 --- a/fendermint/vm/topdown/src/error.rs +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2022-2024 Protocol Labs -// SPDX-License-Identifier: Apache-2.0, MIT - -use crate::{BlockHeight, SequentialAppendError}; -use thiserror::Error; - -/// The errors for top down checkpointing -#[derive(Error, Debug, Eq, PartialEq, Clone)] -pub enum Error { - #[error("Incoming items are not order sequentially")] - NotSequential, - #[error("The parent view update with block height is not sequential: {0:?}")] - NonSequentialParentViewInsert(SequentialAppendError), - #[error("Parent chain reorg detected")] - ParentChainReorgDetected, - #[error("Cannot query parent at height {1}: {0}")] - CannotQueryParent(String, BlockHeight), -} diff --git a/fendermint/vm/topdown/src/launch.rs b/fendermint/vm/topdown/src/launch.rs index 8872bd361..63a2a5d4f 100644 --- a/fendermint/vm/topdown/src/launch.rs +++ b/fendermint/vm/topdown/src/launch.rs @@ -2,7 +2,10 @@ // SPDX-License-Identifier: Apache-2.0, MIT use crate::proxy::ParentQueryProxy; -use crate::syncer::{ParentPoller, ParentSyncerConfig, ParentSyncerReactorClient}; +use crate::syncer::store::InMemoryParentViewStore; +use crate::syncer::{ + start_polling_reactor, ParentPoller, ParentSyncerConfig, ParentSyncerReactorClient, +}; use crate::vote::gossip::GossipClient; use crate::vote::payload::PowerUpdates; use crate::vote::store::InMemoryVoteStore; @@ -28,6 +31,7 @@ use std::time::Duration; /// - listens to certified topdown observation from p2p /// - aggregate peer certified observations into a quorum certificate for commitment in fendermint pub async fn run_topdown( + store: InMemoryParentViewStore, query: CheckpointQuery, config: Config, validator_key: SecretKey, @@ -42,7 +46,7 @@ where ParentClient: ParentQueryProxy + Send + Sync + 'static, { let (syncer_client, syncer_rx) = - ParentSyncerReactorClient::new(config.syncer.request_channel_size); + ParentSyncerReactorClient::new(config.syncer.request_channel_size, store); let (voting_client, voting_rx) = VoteReactorClient::new(config.voting.req_channel_buffer_size); tokio::spawn(async move { @@ -66,7 +70,7 @@ where let poller = poller_fn(&checkpoint, parent_client, config.syncer.clone()); let internal_event_rx = poller.subscribe(); - ParentSyncerReactorClient::start_reactor(syncer_rx, poller, config.syncer); + start_polling_reactor(syncer_rx, poller, config.syncer); VoteReactorClient::start_reactor( voting_rx, StartVoteReactorParams { diff --git a/fendermint/vm/topdown/src/lib.rs b/fendermint/vm/topdown/src/lib.rs index 7eb63d68a..9490bc12a 100644 --- a/fendermint/vm/topdown/src/lib.rs +++ b/fendermint/vm/topdown/src/lib.rs @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0, MIT mod cache; -mod error; pub mod convert; pub mod proxy; @@ -21,8 +20,8 @@ use serde::{Deserialize, Serialize}; use std::fmt::{Display, Formatter}; pub use crate::cache::{SequentialAppendError, SequentialKeyCache, ValueIter}; -pub use crate::error::Error; -use crate::observation::{LinearizedParentBlockView, Observation}; +use crate::observation::Observation; +use crate::syncer::store::InMemoryParentViewStore; use crate::syncer::{ParentSyncerConfig, ParentSyncerReactorClient}; use crate::vote::payload::PowerUpdates; use crate::vote::{VoteConfig, VoteReactorClient}; @@ -56,7 +55,7 @@ pub struct TopdownProposal { #[derive(Clone)] pub struct TopdownClient { - syncer: ParentSyncerReactorClient, + syncer: ParentSyncerReactorClient, voting: VoteReactorClient, } @@ -71,53 +70,17 @@ impl TopdownClient { return Ok(None); }; - let Ok(views) = self - .syncer - .query_parent_block_view(quorum_cert.payload().parent_subnet_height) - .await? - else { - // absorb the error, dont alert the caller - tracing::warn!("no parent block views found"); - return Ok(None); - }; - - let latest_checkpoint = self.syncer.latest_checkpoint().await?; - let mut linear = LinearizedParentBlockView::from(&latest_checkpoint); - - let mut xnet_msgs = vec![]; - let mut validator_changes = vec![]; - - for maybe_view in views { - let Some(v) = maybe_view else { - tracing::error!( - till = quorum_cert.payload().parent_subnet_height, - "parent block view does not have all the data" - ); - return Ok(None); + let end_height = quorum_cert.payload().parent_subnet_height; + let (ob, xnet_msgs, validator_changes) = + match self.syncer.prepare_quorum_cert_content(end_height) { + Ok(v) => v, + Err(e) => { + tracing::error!(err = e.to_string(), "cannot prepare quorum cert content"); + // return None, don't crash the app + return Ok(None); + } }; - if let Err(e) = linear.append(v.clone()) { - tracing::error!(err = e.to_string(), "parent block view cannot be appended"); - return Ok(None); - } - - if let Some(payload) = v.payload { - xnet_msgs.extend(payload.xnet_msgs); - validator_changes.extend(payload.validator_changes); - } - } - - let ob = match linear.into_observation() { - Ok(ob) => ob, - Err(e) => { - tracing::error!( - err = e.to_string(), - "cannot convert linearized parent view into observation" - ); - return Ok(None); - } - }; - if ob != *quorum_cert.payload() { // could be due to the minor quorum, just return no proposal tracing::warn!( diff --git a/fendermint/vm/topdown/src/syncer/mod.rs b/fendermint/vm/topdown/src/syncer/mod.rs index 0269eebc7..547a5db62 100644 --- a/fendermint/vm/topdown/src/syncer/mod.rs +++ b/fendermint/vm/topdown/src/syncer/mod.rs @@ -1,21 +1,26 @@ // Copyright 2022-2024 Protocol Labs // SPDX-License-Identifier: Apache-2.0, MIT -use crate::observation::{Observation, ObservationConfig}; -use crate::syncer::payload::ParentBlockView; +use crate::observation::{LinearizedParentBlockView, Observation, ObservationConfig}; +use crate::syncer::store::ParentViewStore; use crate::{BlockHeight, Checkpoint}; use anyhow::anyhow; use async_trait::async_trait; +use ipc_api::cross::IpcEnvelope; +use ipc_api::staking::StakingChangeRequest; use serde::Deserialize; +use std::sync::{Arc, Mutex}; use std::time::Duration; use tokio::select; -use tokio::sync::{broadcast, mpsc, oneshot}; +use tokio::sync::{broadcast, mpsc}; pub mod error; pub mod payload; pub mod poll; pub mod store; +pub type QuorumCertContent = (Observation, Vec, Vec); + #[derive(Clone, Debug)] pub enum TopDownSyncEvent { /// The fendermint node is syncing with peers @@ -35,6 +40,8 @@ pub struct ParentSyncerConfig { pub chain_head_delay: BlockHeight, /// Parent syncing cron period, in millis pub polling_interval_millis: Duration, + /// Max number of requests to process in the reactor loop + pub max_requests_per_loop: usize, /// Max number of un-finalized parent blocks that should be stored in the store pub max_store_blocks: BlockHeight, /// Attempts to sync as many block as possible till the finalized chain head @@ -44,126 +51,123 @@ pub struct ParentSyncerConfig { } #[derive(Clone)] -pub struct ParentSyncerReactorClient { +pub struct ParentSyncerReactorClient { tx: mpsc::Sender, + checkpoint: Arc>, + store: S, } -impl ParentSyncerReactorClient { - pub fn new(request_channel_size: usize) -> (Self, mpsc::Receiver) { +impl ParentSyncerReactorClient { + pub fn new( + request_channel_size: usize, + store: S, + ) -> (Self, mpsc::Receiver) { let (tx, rx) = mpsc::channel(request_channel_size); - (Self { tx }, rx) + let checkpoint = Arc::new(Mutex::new(Checkpoint::v1(0, vec![], vec![]))); + ( + Self { + tx, + checkpoint, + store, + }, + rx, + ) } +} - pub fn start_reactor( - mut rx: mpsc::Receiver, - mut poller: P, - config: ParentSyncerConfig, - ) { - tokio::spawn(async move { - let polling_interval = config.polling_interval_millis; - - loop { - select! { - _ = tokio::time::sleep(polling_interval) => { - if let Err(e) = poller.try_poll().await { - tracing::error!(err = e.to_string(), "cannot sync with parent"); - } +pub fn start_polling_reactor( + mut rx: mpsc::Receiver, + mut poller: P, + config: ParentSyncerConfig, +) { + let polling_interval = config.polling_interval_millis; + tokio::spawn(async move { + loop { + select! { + _ = tokio::time::sleep(polling_interval) => { + if let Err(e) = poller.try_poll().await { + tracing::error!(err = e.to_string(), "cannot sync with parent"); } - req = rx.recv() => { - let Some(req) = req else { break }; - handle_request(req, &mut poller); + } + req = rx.recv() => { + let Some(req) = req else { break }; + match req { + ParentSyncerRequest::Finalized(cp) => { + if let Err(e) = poller.finalize(cp) { + tracing::error!(err = e.to_string(), "cannot finalize syncer") + } + }, } } } - - tracing::warn!("parent syncer stopped") - }); - } + } + }); } /// Polls the parent block view #[async_trait] pub trait ParentPoller { + type Store: ParentViewStore + Send + Sync + 'static + Clone; + fn subscribe(&self) -> broadcast::Receiver; - /// The previous checkpoint committed - fn last_checkpoint(&self) -> &Checkpoint; + fn store(&self) -> Self::Store; /// The target block height is finalized, purge all the parent view before the target height fn finalize(&mut self, checkpoint: Checkpoint) -> anyhow::Result<()>; /// Try to poll the next parent height async fn try_poll(&mut self) -> anyhow::Result<()>; - - /// Dump the parent block view from the height after the last committed checkpoint to the `to` height - fn dump_parent_block_views( - &self, - to: BlockHeight, - ) -> anyhow::Result>>; } -impl ParentSyncerReactorClient { +impl ParentSyncerReactorClient { + fn set_checkpoint(&self, cp: Checkpoint) { + let mut checkpoint = self.checkpoint.lock().unwrap(); + *checkpoint = cp.clone(); + } /// Marks the height as finalized. /// There is no need to wait for ack from the reactor pub async fn finalize_parent_height(&self, cp: Checkpoint) -> anyhow::Result<()> { + self.set_checkpoint(cp.clone()); self.tx.send(ParentSyncerRequest::Finalized(cp)).await?; Ok(()) } - pub async fn latest_checkpoint(&self) -> anyhow::Result { - let (tx, rx) = oneshot::channel(); - self.tx - .send(ParentSyncerRequest::QueryLatestCheckpoint(tx)) - .await?; - Ok(rx.await?) - } - - pub async fn query_parent_block_view( + pub fn prepare_quorum_cert_content( &self, - to: BlockHeight, - ) -> anyhow::Result>>> { - let (tx, rx) = oneshot::channel(); - self.tx - .send(ParentSyncerRequest::QueryParentBlockViews { to, tx }) - .await?; - Ok(rx.await?) + end_height: BlockHeight, + ) -> anyhow::Result { + let latest_checkpoint = self.checkpoint.lock().unwrap().clone(); + + let mut xnet_msgs = vec![]; + let mut validator_changes = vec![]; + let mut linear = LinearizedParentBlockView::from(&latest_checkpoint); + + let start = latest_checkpoint.target_height() + 1; + for h in start..=end_height { + let Some(v) = self.store.get(h)? else { + return Err(anyhow!("parent block view store does not have data at {h}")); + }; + + if let Err(e) = linear.append(v.clone()) { + return Err(anyhow!("parent block view cannot be appended: {e}")); + } + + if let Some(payload) = v.payload { + xnet_msgs.extend(payload.xnet_msgs); + validator_changes.extend(payload.validator_changes); + } + } + + let ob = linear + .into_observation() + .map_err(|e| anyhow!("cannot convert linearized parent view into observation: {e}"))?; + + Ok((ob, xnet_msgs, validator_changes)) } } pub enum ParentSyncerRequest { /// A new parent height is finalized Finalized(Checkpoint), - QueryLatestCheckpoint(oneshot::Sender), - QueryParentBlockViews { - to: BlockHeight, - tx: oneshot::Sender>>>, - }, -} - -fn handle_request( - req: ParentSyncerRequest, - poller: &mut P, -) { - match req { - ParentSyncerRequest::Finalized(c) => { - let height = c.target_height(); - if let Err(e) = poller.finalize(c) { - tracing::error!(height, err = e.to_string(), "cannot finalize parent viewer"); - } - } - ParentSyncerRequest::QueryParentBlockViews { to, tx } => { - let r = poller.dump_parent_block_views(to).map_err(|e| { - tracing::error!( - height = to, - err = e.to_string(), - "cannot query parent block view" - ); - anyhow!("cannot read parent block view: {}", e) - }); - let _ = tx.send(r); - } - ParentSyncerRequest::QueryLatestCheckpoint(tx) => { - let _ = tx.send(poller.last_checkpoint().clone()); - } - } } diff --git a/fendermint/vm/topdown/src/syncer/poll.rs b/fendermint/vm/topdown/src/syncer/poll.rs index 38915d21d..1e73eb1e3 100644 --- a/fendermint/vm/topdown/src/syncer/poll.rs +++ b/fendermint/vm/topdown/src/syncer/poll.rs @@ -29,15 +29,17 @@ pub struct ParentPoll { #[async_trait] impl ParentPoller for ParentPoll where - S: ParentViewStore + Send + Sync + 'static, + S: ParentViewStore + Send + Sync + 'static + Clone, P: Send + Sync + 'static + ParentQueryProxy, { + type Store = S; + fn subscribe(&self) -> Receiver { self.event_broadcast.subscribe() } - fn last_checkpoint(&self) -> &Checkpoint { - &self.last_finalized + fn store(&self) -> Self::Store { + self.store.clone() } /// The target block height is finalized, purge all the parent view before the target height @@ -48,6 +50,9 @@ where for h in min_height..=checkpoint.target_height() { self.store.purge(h)?; } + + self.last_finalized = checkpoint; + Ok(()) } @@ -58,7 +63,7 @@ where }; let (mut latest_height_fetched, mut first_non_null_parent_hash) = - self.latest_nonnull_data().await?; + self.latest_nonnull_data()?; tracing::debug!(chain_head, latest_height_fetched, "syncing heights"); if latest_height_fetched > chain_head { @@ -110,22 +115,6 @@ where Ok(()) } - - fn dump_parent_block_views( - &self, - to: BlockHeight, - ) -> anyhow::Result>> { - let store = self.store(); - - let mut r = vec![]; - - let start = self.last_checkpoint().target_height() + 1; - for h in start..=to { - r.push(store.get(h)?) - } - - Ok(r) - } } impl ParentPoll @@ -144,12 +133,8 @@ where } } - pub fn store(&self) -> &S { - &self.store - } - /// Get the latest non null block data stored - async fn latest_nonnull_data(&self) -> anyhow::Result<(BlockHeight, BlockHash)> { + fn latest_nonnull_data(&self) -> anyhow::Result<(BlockHeight, BlockHash)> { let Some(latest_height) = self.store.max_parent_view_height()? else { return Ok(( self.last_finalized.target_height(), diff --git a/fendermint/vm/topdown/src/syncer/store.rs b/fendermint/vm/topdown/src/syncer/store.rs index e952f60b2..20082dd32 100644 --- a/fendermint/vm/topdown/src/syncer/store.rs +++ b/fendermint/vm/topdown/src/syncer/store.rs @@ -4,25 +4,27 @@ use crate::syncer::error::Error; use crate::syncer::payload::ParentBlockView; use crate::{BlockHeight, SequentialKeyCache}; +use std::sync::{Arc, RwLock}; /// Stores the parent view observed of the current node pub trait ParentViewStore { /// Store a newly observed parent view - fn store(&mut self, view: ParentBlockView) -> Result<(), Error>; + fn store(&self, view: ParentBlockView) -> Result<(), Error>; /// Get the parent view at the specified height fn get(&self, height: BlockHeight) -> Result, Error>; /// Purge the parent view at the target height - fn purge(&mut self, height: BlockHeight) -> Result<(), Error>; + fn purge(&self, height: BlockHeight) -> Result<(), Error>; fn min_parent_view_height(&self) -> Result, Error>; fn max_parent_view_height(&self) -> Result, Error>; } +#[derive(Clone)] pub struct InMemoryParentViewStore { - inner: SequentialKeyCache, + inner: Arc>>, } impl Default for InMemoryParentViewStore { @@ -34,32 +36,37 @@ impl Default for InMemoryParentViewStore { impl InMemoryParentViewStore { pub fn new() -> Self { Self { - inner: SequentialKeyCache::sequential(), + inner: Arc::new(RwLock::new(SequentialKeyCache::sequential())), } } } impl ParentViewStore for InMemoryParentViewStore { - fn store(&mut self, view: ParentBlockView) -> Result<(), Error> { - self.inner + fn store(&self, view: ParentBlockView) -> Result<(), Error> { + let mut inner = self.inner.write().unwrap(); + inner .append(view.parent_height, view) .map_err(|_| Error::NonSequentialParentViewInsert) } fn get(&self, height: BlockHeight) -> Result, Error> { - Ok(self.inner.get_value(height).cloned()) + let inner = self.inner.read().unwrap(); + Ok(inner.get_value(height).cloned()) } - fn purge(&mut self, height: BlockHeight) -> Result<(), Error> { - self.inner.remove_key_below(height + 1); + fn purge(&self, height: BlockHeight) -> Result<(), Error> { + let mut inner = self.inner.write().unwrap(); + inner.remove_key_below(height + 1); Ok(()) } fn min_parent_view_height(&self) -> Result, Error> { - Ok(self.inner.lower_bound()) + let inner = self.inner.read().unwrap(); + Ok(inner.lower_bound()) } fn max_parent_view_height(&self) -> Result, Error> { - Ok(self.inner.upper_bound()) + let inner = self.inner.read().unwrap(); + Ok(inner.upper_bound()) } } From 2ca18eb9c74ab7dfa9bd2a78641405646deed8f6 Mon Sep 17 00:00:00 2001 From: cryptoAtwill Date: Wed, 30 Oct 2024 15:24:40 +0800 Subject: [PATCH 16/22] fix null checkpoint when boot --- fendermint/vm/topdown/src/launch.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/fendermint/vm/topdown/src/launch.rs b/fendermint/vm/topdown/src/launch.rs index 63a2a5d4f..4170ec6ee 100644 --- a/fendermint/vm/topdown/src/launch.rs +++ b/fendermint/vm/topdown/src/launch.rs @@ -49,6 +49,7 @@ where ParentSyncerReactorClient::new(config.syncer.request_channel_size, store); let (voting_client, voting_rx) = VoteReactorClient::new(config.voting.req_channel_buffer_size); + let syncer_client_cloned = syncer_client.clone(); tokio::spawn(async move { let query = Arc::new(query); let checkpoint = query_starting_checkpoint(&query, &parent_client) @@ -70,6 +71,11 @@ where let poller = poller_fn(&checkpoint, parent_client, config.syncer.clone()); let internal_event_rx = poller.subscribe(); + syncer_client_cloned + .finalize_parent_height(checkpoint.clone()) + .await + .expect("should be ok to set checkpoint"); + start_polling_reactor(syncer_rx, poller, config.syncer); VoteReactorClient::start_reactor( voting_rx, From 67d3d671a680262f3a7eeff0a71e55f3f2300340 Mon Sep 17 00:00:00 2001 From: cryptoAtwill Date: Thu, 31 Oct 2024 11:32:50 +0800 Subject: [PATCH 17/22] fix errors --- fendermint/vm/topdown/src/vote/tally.rs | 15 +++- fendermint/vm/topdown/tests/vote_reactor.rs | 76 ++++++++++++--------- 2 files changed, 56 insertions(+), 35 deletions(-) diff --git a/fendermint/vm/topdown/src/vote/tally.rs b/fendermint/vm/topdown/src/vote/tally.rs index 0aba56ab7..ac4d92370 100644 --- a/fendermint/vm/topdown/src/vote/tally.rs +++ b/fendermint/vm/topdown/src/vote/tally.rs @@ -90,7 +90,10 @@ impl VoteTally { } pub fn check_quorum_cert(&self, cert: &ECDSACertificate) -> bool { - let power_table = self.power_table.iter().map(|(v, w)| (v.public_key(), *w)); + let power_table = self + .ordered_validators() + .into_iter() + .map(|(v, w)| (v.public_key(), *w)); match cert.quorum_reached(power_table, self.quorum_ratio) { Ok(v) => v, Err(e) => { @@ -196,7 +199,15 @@ impl VoteTally { /// /// After this operation the minimum item in the chain will the new finalized block. pub fn set_finalized(&mut self, block_height: BlockHeight) -> Result<(), Error> { - self.votes.purge_votes_at_height(block_height)?; + let start = if let Some(start) = self.votes.earliest_vote_height()? { + start + } else { + block_height + }; + for h in start..=block_height { + self.votes.purge_votes_at_height(h)?; + } + self.last_finalized_height = block_height; Ok(()) } diff --git a/fendermint/vm/topdown/tests/vote_reactor.rs b/fendermint/vm/topdown/tests/vote_reactor.rs index 15590618a..90ac49ed0 100644 --- a/fendermint/vm/topdown/tests/vote_reactor.rs +++ b/fendermint/vm/topdown/tests/vote_reactor.rs @@ -14,9 +14,7 @@ use fendermint_vm_topdown::vote::error::Error; use fendermint_vm_topdown::vote::gossip::GossipClient; use fendermint_vm_topdown::vote::payload::{PowerUpdates, Vote}; use fendermint_vm_topdown::vote::store::InMemoryVoteStore; -use fendermint_vm_topdown::vote::{ - start_vote_reactor, StartVoteReactorParams, VoteConfig, VoteReactorClient, Weight, -}; +use fendermint_vm_topdown::vote::{StartVoteReactorParams, VoteConfig, VoteReactorClient, Weight}; use fendermint_vm_topdown::BlockHeight; use tokio::sync::broadcast; use tokio::sync::broadcast::error::TryRecvError; @@ -130,16 +128,20 @@ async fn simple_lifecycle() { let (internal_event_tx, _) = broadcast::channel(validators.len() + 1); - let client = start_vote_reactor(StartVoteReactorParams { - config: config.clone(), - validator_key: validators[0].sk.clone(), - power_table: power_updates.clone(), - last_finalized_height: initial_finalized_height, - latest_child_block: 100, - gossip: gossips.pop().unwrap(), - vote_store: InMemoryVoteStore::default(), - internal_event_listener: internal_event_tx.subscribe(), - }) + let (client, rx) = VoteReactorClient::new(1024); + VoteReactorClient::start_reactor( + rx, + StartVoteReactorParams { + config: config.clone(), + validator_key: validators[0].sk.clone(), + power_table: power_updates.clone(), + last_finalized_height: initial_finalized_height, + latest_child_block: 100, + gossip: gossips.pop().unwrap(), + vote_store: InMemoryVoteStore::default(), + internal_event_listener: internal_event_tx.subscribe(), + }, + ) .unwrap(); assert_eq!(client.find_quorum().await.unwrap(), None); @@ -208,16 +210,20 @@ async fn waiting_for_quorum() { for i in 0..validators.len() { let (internal_event_tx, _) = broadcast::channel(validators.len() + 1); - let client = start_vote_reactor(StartVoteReactorParams { - config: config.clone(), - validator_key: validators[i].sk.clone(), - power_table: power_updates.clone(), - last_finalized_height: initial_finalized_height, - latest_child_block: 100, - gossip: gossips.pop().unwrap(), - vote_store: InMemoryVoteStore::default(), - internal_event_listener: internal_event_tx.subscribe(), - }) + let (client, rx) = VoteReactorClient::new(1024); + VoteReactorClient::start_reactor( + rx, + StartVoteReactorParams { + config: config.clone(), + validator_key: validators[i].sk.clone(), + power_table: power_updates.clone(), + last_finalized_height: initial_finalized_height, + latest_child_block: 100, + gossip: gossips.pop().unwrap(), + vote_store: InMemoryVoteStore::default(), + internal_event_listener: internal_event_tx.subscribe(), + }, + ) .unwrap(); clients.push(client); @@ -338,16 +344,20 @@ async fn all_validator_in_sync() { let mut node_clients = vec![]; for validator in &validators { - let r = start_vote_reactor(StartVoteReactorParams { - config: config.clone(), - validator_key: validator.sk.clone(), - power_table: power_updates.clone(), - last_finalized_height: initial_finalized_height, - latest_child_block: 100, - gossip: gossips.pop().unwrap(), - vote_store: InMemoryVoteStore::default(), - internal_event_listener: internal_event_tx.subscribe(), - }) + let (r, rx) = VoteReactorClient::new(1024); + VoteReactorClient::start_reactor( + rx, + StartVoteReactorParams { + config: config.clone(), + validator_key: validator.sk.clone(), + power_table: power_updates.clone(), + last_finalized_height: initial_finalized_height, + latest_child_block: 100, + gossip: gossips.pop().unwrap(), + vote_store: InMemoryVoteStore::default(), + internal_event_listener: internal_event_tx.subscribe(), + }, + ) .unwrap(); node_clients.push(r); From e6514c9eeada7c4a7e67f9ebd079d62a40c60aed Mon Sep 17 00:00:00 2001 From: cryptoAtwill Date: Thu, 31 Oct 2024 11:36:45 +0800 Subject: [PATCH 18/22] remove equivocation vote check --- fendermint/vm/topdown/src/vote/tally.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/fendermint/vm/topdown/src/vote/tally.rs b/fendermint/vm/topdown/src/vote/tally.rs index ac4d92370..4151c457e 100644 --- a/fendermint/vm/topdown/src/vote/tally.rs +++ b/fendermint/vm/topdown/src/vote/tally.rs @@ -150,12 +150,11 @@ impl VoteTally { } if self.votes.has_voted(&parent_height, &validator)? { - tracing::error!( + tracing::warn!( parent_height, validator = validator.to_string(), "equivocation by validator" ); - return Err(Error::Equivocation); } self.votes.store_vote(parent_height, vote)?; From 700f74a1c931f294290146e7be8a2bb9b60a5956 Mon Sep 17 00:00:00 2001 From: cryptoAtwill Date: Thu, 31 Oct 2024 12:42:18 +0800 Subject: [PATCH 19/22] fix ipc error --- ipc/api/src/lib.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ipc/api/src/lib.rs b/ipc/api/src/lib.rs index 1b61454de..17fd3a5e9 100644 --- a/ipc/api/src/lib.rs +++ b/ipc/api/src/lib.rs @@ -96,7 +96,11 @@ impl serde_with::SerializeAs> for HumanReadable { where S: Serializer, { - hex::encode(source).serialize(serializer) + if serializer.is_human_readable() { + hex::encode(source).serialize(serializer) + } else { + source.serialize(serializer) + } } } From 7e08db70b1dd2330db440c979a7131b93a917fd4 Mon Sep 17 00:00:00 2001 From: cryptoAtwill Date: Mon, 6 Jan 2025 18:36:34 +0800 Subject: [PATCH 20/22] clippy --- fendermint/app/src/cmd/run.rs | 4 +--- fendermint/app/src/ipc.rs | 1 - ipld/resolver/src/behaviour/membership.rs | 1 - ipld/resolver/src/service.rs | 1 - 4 files changed, 1 insertion(+), 6 deletions(-) diff --git a/fendermint/app/src/cmd/run.rs b/fendermint/app/src/cmd/run.rs index 5fbbe1679..6074dc03b 100644 --- a/fendermint/app/src/cmd/run.rs +++ b/fendermint/app/src/cmd/run.rs @@ -40,18 +40,16 @@ use fendermint_vm_topdown::vote::VoteConfig; use fendermint_vm_topdown::{Checkpoint, TopdownClient}; use fvm_shared::address::{current_network, Address, Network}; use ipc_api::subnet_id::SubnetID; -use ipc_ipld_resolver::{Event as ResolverEvent, Event, SubnetVoteRecord}; +use ipc_ipld_resolver::{Event as ResolverEvent, SubnetVoteRecord}; use ipc_observability::observe::register_metrics as register_default_metrics; use ipc_provider::config::subnet::{EVMSubnet, SubnetConfig}; use ipc_provider::IpcProvider; use libp2p::identity::secp256k1; use libp2p::identity::Keypair; -use std::future::Future; use std::sync::Arc; use std::time::Duration; use tendermint_rpc::Client; use tokio::sync::broadcast; -use tokio::sync::broadcast::error::{RecvError, TryRecvError}; use tokio::sync::broadcast::Receiver; use tower::ServiceBuilder; use tracing::info; diff --git a/fendermint/app/src/ipc.rs b/fendermint/app/src/ipc.rs index 981815427..5c7ed93c6 100644 --- a/fendermint/app/src/ipc.rs +++ b/fendermint/app/src/ipc.rs @@ -14,7 +14,6 @@ use fvm_ipld_blockstore::Blockstore; use std::sync::Arc; use fendermint_vm_topdown::Checkpoint; -use tendermint_rpc::Client; /// Queries the LATEST COMMITTED parent finality from the storage pub struct AppParentFinalityQuery diff --git a/ipld/resolver/src/behaviour/membership.rs b/ipld/resolver/src/behaviour/membership.rs index b62662839..fa62323e9 100644 --- a/ipld/resolver/src/behaviour/membership.rs +++ b/ipld/resolver/src/behaviour/membership.rs @@ -9,7 +9,6 @@ use crate::hash::blake2b_256; use crate::provider_cache::{ProviderDelta, SubnetProviderCache}; use crate::provider_record::{ProviderRecord, SignedProviderRecord}; use crate::vote_record::SubnetVoteRecord; -use crate::vote_record::{SignedVoteRecord, VoteRecord}; use crate::Timestamp; use crate::{observe, NetworkConfig}; use anyhow::anyhow; diff --git a/ipld/resolver/src/service.rs b/ipld/resolver/src/service.rs index a06ffa163..627f1e67b 100644 --- a/ipld/resolver/src/service.rs +++ b/ipld/resolver/src/service.rs @@ -9,7 +9,6 @@ use crate::behaviour::{ }; use crate::client::Client; use crate::observe; -use crate::vote_record::{SignedVoteRecord, VoteRecord}; use anyhow::anyhow; use bloom::{BloomFilter, ASMS}; use ipc_api::subnet_id::SubnetID; From a289a56298eebc4b0071d8a385b3a9bbb6a42223 Mon Sep 17 00:00:00 2001 From: cryptoAtwill Date: Mon, 13 Jan 2025 12:34:03 +0800 Subject: [PATCH 21/22] fix CI --- contracts/test/IntegrationTestBase.sol | 12 ++++++++++-- contracts/test/integration/GatewayDiamond.t.sol | 17 ++++++++++++++--- contracts/test/integration/MultiSubnet.t.sol | 6 +++++- .../vm/message/golden/chain/ipc_top_down.cbor | 2 +- .../vm/message/golden/chain/ipc_top_down.txt | 2 +- fendermint/vm/topdown/src/vote/tally.rs | 3 ++- 6 files changed, 33 insertions(+), 9 deletions(-) diff --git a/contracts/test/IntegrationTestBase.sol b/contracts/test/IntegrationTestBase.sol index 8f84ecd84..65053b095 100644 --- a/contracts/test/IntegrationTestBase.sol +++ b/contracts/test/IntegrationTestBase.sol @@ -782,7 +782,11 @@ contract IntegrationTestBase is Test, TestParams, TestRegistry, TestSubnetActor, weights[1] = 100; weights[2] = 100; - TopdownCheckpoint memory finality = TopdownCheckpoint({height: block.number, blockHash: bytes32(0), effectsCommitment: new bytes(0)}); + TopdownCheckpoint memory finality = TopdownCheckpoint({ + height: block.number, + blockHash: bytes32(0), + effectsCommitment: new bytes(0) + }); vm.prank(FilAddress.SYSTEM_ACTOR); gatewayDiamond.topDownFinalizer().commitTopdownCheckpoint(finality); @@ -829,7 +833,11 @@ contract IntegrationTestBase is Test, TestParams, TestRegistry, TestSubnetActor, weights[0] = weight; vm.deal(validator, 1); - TopdownCheckpoint memory finality = TopdownCheckpoint({height: block.number, blockHash: bytes32(0), effectsCommitment: new bytes(0)}); + TopdownCheckpoint memory finality = TopdownCheckpoint({ + height: block.number, + blockHash: bytes32(0), + effectsCommitment: new bytes(0) + }); // uint64 n = gatewayDiamond.getter().getLastConfigurationNumber() + 1; vm.startPrank(FilAddress.SYSTEM_ACTOR); diff --git a/contracts/test/integration/GatewayDiamond.t.sol b/contracts/test/integration/GatewayDiamond.t.sol index 5ac07d131..7f269a6ac 100644 --- a/contracts/test/integration/GatewayDiamond.t.sol +++ b/contracts/test/integration/GatewayDiamond.t.sol @@ -962,7 +962,11 @@ contract GatewayActorDiamondTest is Test, IntegrationTestBase, SubnetWithNativeT vm.prank(caller); vm.expectRevert(NotSystemActor.selector); - TopdownCheckpoint memory finality = TopdownCheckpoint({height: block.number, blockHash: bytes32(0), effectsCommitment: new bytes(0)}); + TopdownCheckpoint memory finality = TopdownCheckpoint({ + height: block.number, + blockHash: bytes32(0), + effectsCommitment: new bytes(0) + }); gatewayDiamond.topDownFinalizer().commitTopdownCheckpoint(finality); } @@ -1048,14 +1052,21 @@ contract GatewayActorDiamondTest is Test, IntegrationTestBase, SubnetWithNativeT // not the same as init committed parent finality height vm.roll(10); - TopdownCheckpoint memory finality = TopdownCheckpoint({height: block.number, blockHash: bytes32(0), effectsCommitment: new bytes(0)}); + TopdownCheckpoint memory finality = TopdownCheckpoint({ + height: block.number, + blockHash: bytes32(0), + effectsCommitment: new bytes(0) + }); gatewayDiamond.topDownFinalizer().commitTopdownCheckpoint(finality); TopdownCheckpoint memory committedFinality = gatewayDiamond.getter().getTopdownCheckpoint(block.number); require(committedFinality.height == finality.height, "heights are not equal"); require(committedFinality.blockHash == finality.blockHash, "blockHash is not equal"); - require(gatewayDiamond.getter().getLatestTopdownCheckpoint().height == block.number, "finality height not equal"); + require( + gatewayDiamond.getter().getLatestTopdownCheckpoint().height == block.number, + "finality height not equal" + ); vm.stopPrank(); } diff --git a/contracts/test/integration/MultiSubnet.t.sol b/contracts/test/integration/MultiSubnet.t.sol index 835fe2236..47b71c48f 100644 --- a/contracts/test/integration/MultiSubnet.t.sol +++ b/contracts/test/integration/MultiSubnet.t.sol @@ -1292,7 +1292,11 @@ contract MultiSubnetTest is Test, IntegrationTestBase { function commitTopdownCheckpoint(address gateway) internal { vm.roll(10); - TopdownCheckpoint memory finality = TopdownCheckpoint({height: block.number, blockHash: bytes32(0), effectsCommitment: new bytes(0)}); + TopdownCheckpoint memory finality = TopdownCheckpoint({ + height: block.number, + blockHash: bytes32(0), + effectsCommitment: new bytes(0) + }); TopDownFinalityFacet gwTopDownFinalityFacet = TopDownFinalityFacet(address(gateway)); diff --git a/fendermint/vm/message/golden/chain/ipc_top_down.cbor b/fendermint/vm/message/golden/chain/ipc_top_down.cbor index c9db04135..3e37ed490 100644 --- a/fendermint/vm/message/golden/chain/ipc_top_down.cbor +++ b/fendermint/vm/message/golden/chain/ipc_top_down.cbor @@ -1 +1 @@ -a163497063a16b546f70446f776e45786563a26463657274a2677061796c6f6164a36d706172656e745f6865696768741bffffffffffffffff6b706172656e745f6861736885189f183918f9188b18357763756d756c61746976655f656666656374735f636f6d6d8801183a186018a7184e182f181c183a6a7369676e61747572657381f66765666665637473828080 \ No newline at end of file +a163497063a16b546f70446f776e45786563a26463657274a2677061796c6f6164a374706172656e745f7375626e65745f6865696768741bb71f35f7c2df1f1c72706172656e745f7375626e65745f68617368890418a918771891187c187a1872187118927763756d756c61746976655f656666656374735f636f6d6d8118bd6a7369676e61747572657381f66765666665637473828080 \ No newline at end of file diff --git a/fendermint/vm/message/golden/chain/ipc_top_down.txt b/fendermint/vm/message/golden/chain/ipc_top_down.txt index e931e1b07..1f895acae 100644 --- a/fendermint/vm/message/golden/chain/ipc_top_down.txt +++ b/fendermint/vm/message/golden/chain/ipc_top_down.txt @@ -1 +1 @@ -Ipc(TopDownExec(TopdownProposal { cert: ECDSACertificate { payload: Observation { parent_height: 18446744073709551615, parent_hash: [159, 57, 249, 139, 53], cumulative_effects_comm: [1, 58, 96, 167, 78, 47, 28, 58] }, signatures: [None] }, effects: ([], []) })) \ No newline at end of file +Ipc(TopDownExec(TopdownProposal { cert: ECDSACertificate { payload: Observation { parent_subnet_height: 13195324771461439260, parent_subnet_hash: [4, 169, 119, 145, 124, 122, 114, 113, 146], cumulative_effects_comm: [189] }, signatures: [None] }, effects: ([], []) })) \ No newline at end of file diff --git a/fendermint/vm/topdown/src/vote/tally.rs b/fendermint/vm/topdown/src/vote/tally.rs index 4151c457e..ac4d92370 100644 --- a/fendermint/vm/topdown/src/vote/tally.rs +++ b/fendermint/vm/topdown/src/vote/tally.rs @@ -150,11 +150,12 @@ impl VoteTally { } if self.votes.has_voted(&parent_height, &validator)? { - tracing::warn!( + tracing::error!( parent_height, validator = validator.to_string(), "equivocation by validator" ); + return Err(Error::Equivocation); } self.votes.store_vote(parent_height, vote)?; From 4a2a42e44b97d95e3293a5457744a357cef87d2d Mon Sep 17 00:00:00 2001 From: cryptoAtwill Date: Mon, 13 Jan 2025 13:00:17 +0800 Subject: [PATCH 22/22] fix tests --- extras/linked-token/test/MultiSubnetTest.t.sol | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extras/linked-token/test/MultiSubnetTest.t.sol b/extras/linked-token/test/MultiSubnetTest.t.sol index 8b421d7e3..a98dbd311 100644 --- a/extras/linked-token/test/MultiSubnetTest.t.sol +++ b/extras/linked-token/test/MultiSubnetTest.t.sol @@ -30,7 +30,7 @@ import {SubnetActorCheckpointingFacet} from "@ipc/contracts/subnet/SubnetActorCh import {CheckpointingFacet} from "@ipc/contracts/gateway/router/CheckpointingFacet.sol"; import {FvmAddressHelper} from "@ipc/contracts/lib/FvmAddressHelper.sol"; import {Consensus, CompressedActivityRollup} from "@ipc/contracts/structs/Activity.sol"; -import {IpcEnvelope, BottomUpMsgBatch, BottomUpCheckpoint, ParentFinality, IpcMsgKind, ResultMsg, CallMsg} from "@ipc/contracts/structs/CrossNet.sol"; +import {IpcEnvelope, BottomUpMsgBatch, BottomUpCheckpoint, IpcMsgKind, ResultMsg, CallMsg} from "@ipc/contracts/structs/CrossNet.sol"; import {SubnetIDHelper} from "@ipc/contracts/lib/SubnetIDHelper.sol"; import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; import {CrossMsgHelper} from "@ipc/contracts/lib/CrossMsgHelper.sol";