diff --git a/Cargo.lock b/Cargo.lock index 2dd0a4d81f..a0257b81cc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1661,6 +1661,7 @@ dependencies = [ "rand 0.8.5", "rand_xorshift 0.2.0", "random-crash", + "rangetools", "rayon", "rlp 0.4.6", "rlp_derive", @@ -2013,6 +2014,7 @@ dependencies = [ "base64ct", "bcs", "blockgen", + "bls-signatures", "cfx-types", "cfxcore", "cfxcore-accounts", @@ -7014,6 +7016,12 @@ dependencies = [ "rand 0.7.3", ] +[[package]] +name = "rangetools" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6e97ca3dbabd81e6033cfe09f0cef37c89f34f2a9223cab7cf99305d46a9633" + [[package]] name = "rayon" version = "1.10.0" diff --git a/bins/conflux/Cargo.toml b/bins/conflux/Cargo.toml index 61b9782f0f..303528d1b3 100644 --- a/bins/conflux/Cargo.toml +++ b/bins/conflux/Cargo.toml @@ -63,6 +63,7 @@ hex = "0.3.0" base64ct = "=1.1.1" parity-version = { workspace = true } tokio = { version = "1", features = ["rt"] } +bls-signatures = { workspace = true } [target.'cfg(not(target_env = "msvc"))'.dependencies.jemallocator] version = "0.3.2" @@ -85,3 +86,7 @@ default = ["jemalloc-global"] deadlock-detection = ["parking_lot/deadlock_detection"] jemalloc-global = ["jemallocator", "malloc_size_of/jemalloc-global"] u64-mpt-db-key = ["client/u64_mpt_db_key"] +# According to Rust's feature unification, when a feature is enabled for a dependency in the root package, +# it will be enabled across all paths depending on that package. +# (https://doc.rust-lang.org/cargo/reference/features.html#feature-unification) +blst-portable = ["bls-signatures/blst-portable"] diff --git a/crates/cfxcore/core/Cargo.toml b/crates/cfxcore/core/Cargo.toml index 11e02619c1..f6c61c5f48 100644 --- a/crates/cfxcore/core/Cargo.toml +++ b/crates/cfxcore/core/Cargo.toml @@ -66,6 +66,7 @@ prometheus = { version = "0.7.0", default-features = false } rand = { workspace = true } rand_08 = {package = "rand", version = "0.8"} rand_xorshift="0.2" +rangetools = "0.1.4" random-crash = { workspace = true } rayon = { workspace = true } rlp = { workspace = true } diff --git a/crates/cfxcore/core/src/consensus/consensus_inner/consensus_new_block_handler.rs b/crates/cfxcore/core/src/consensus/consensus_inner/consensus_new_block_handler.rs index 02b4fd91ce..eea2b84ca9 100644 --- a/crates/cfxcore/core/src/consensus/consensus_inner/consensus_new_block_handler.rs +++ b/crates/cfxcore/core/src/consensus/consensus_inner/consensus_new_block_handler.rs @@ -12,6 +12,7 @@ use crate::{ consensus_executor::{ConsensusExecutor, EpochExecutionTask}, ConsensusGraphInner, NULL, }, + pivot_hint::PivotHint, pos_handler::PosVerifier, ConsensusConfig, }, @@ -49,6 +50,8 @@ pub struct ConsensusNewBlockHandler { /// The type of this node: Archive, Full, or Light. node_type: NodeType, + + pivot_hint: Option>, } /// ConsensusNewBlockHandler contains all sub-routines for handling new arriving @@ -60,6 +63,7 @@ impl ConsensusNewBlockHandler { data_man: Arc, executor: Arc, statistics: SharedStatistics, notifications: Arc, node_type: NodeType, pos_verifier: Arc, + pivot_hint: Option>, ) -> Self { let epochs_sender = notifications.epochs_ordered.clone(); let blame_verifier = @@ -75,6 +79,7 @@ impl ConsensusNewBlockHandler { epochs_sender, blame_verifier, node_type, + pivot_hint, } } @@ -1127,8 +1132,8 @@ impl ConsensusNewBlockHandler { return; } else { debug!( - "Start activating block in ConsensusGraph: index = {:?} hash={:?}", - me, inner.arena[me].hash, + "Start activating block in ConsensusGraph: index = {:?} hash={:?} height={:?}", + me, inner.arena[me].hash, inner.arena[me].height, ); } @@ -1221,19 +1226,39 @@ impl ConsensusNewBlockHandler { let force_lca = inner.lca(force_confirm, last); if force_lca == force_confirm && inner.arena[me].parent == last { - inner.pivot_chain.push(me); - inner.set_epoch_number_in_epoch( - me, - inner.pivot_index_to_height(inner.pivot_chain.len()) - 1, - ); - inner.pivot_chain_metadata.push(Default::default()); - extend_pivot = true; - pivot_changed = true; - fork_at = inner.pivot_index_to_height(old_pivot_chain_len) + let me_height = inner.arena[me].height; + let me_hash = inner.arena[me].hash; + let allow_extend = self + .pivot_hint + .as_ref() + .map_or(true, |hint| hint.allow_extend(me_height, me_hash)); + if allow_extend { + inner.pivot_chain.push(me); + inner.set_epoch_number_in_epoch( + me, + inner.pivot_index_to_height(inner.pivot_chain.len()) - 1, + ); + inner.pivot_chain_metadata.push(Default::default()); + extend_pivot = true; + pivot_changed = true; + fork_at = inner.pivot_index_to_height(old_pivot_chain_len); + } else { + debug!("Chain extend rejected by pivot hint: height={me_height}, hash={me_hash:?}"); + fork_at = inner.pivot_index_to_height(old_pivot_chain_len); + } } else { let lca = inner.lca(last, me); let new; - if force_confirm != force_lca { + if self.pivot_hint.is_some() && lca == last { + // If pivot hint is enabled, `me` could be an extend of the + // pivot chain, but its parent block is not on the pivot chain. + // This special case can only happen + debug!("Chain extend rejected by pivot hint because parent is rejected."); + fork_at = inner.pivot_index_to_height(old_pivot_chain_len); + // In this case, `pivot_changed` is false. So `new` can be + // aribitrary value. + new = 0; + } else if force_confirm != force_lca { debug!( "pivot chain switch to force_confirm={} force_height={}", force_confirm, force_height @@ -1248,6 +1273,10 @@ impl ConsensusNewBlockHandler { new = inner.ancestor_at(me, fork_at); let new_weight = inner.weight_tree.get(new); + let me_height = inner.arena[me].height; + let me_ancestor_hash_at = + |height| inner.arena[inner.ancestor_at(me, height)].hash; + // Note that for properly set consensus parameters, fork_at will // always after the force_height (i.e., the // force confirmation is always stable). @@ -1259,6 +1288,13 @@ impl ConsensusNewBlockHandler { (new_weight, &inner.arena[new].hash), (prev_weight, &inner.arena[prev].hash), ) + && self.pivot_hint.as_ref().map_or(true, |hint| { + hint.allow_switch( + fork_at, + me_height, + me_ancestor_hash_at, + ) + }) { pivot_changed = true; } else { diff --git a/crates/cfxcore/core/src/consensus/mod.rs b/crates/cfxcore/core/src/consensus/mod.rs index e9d0d14b0f..e3bff14e6a 100644 --- a/crates/cfxcore/core/src/consensus/mod.rs +++ b/crates/cfxcore/core/src/consensus/mod.rs @@ -7,8 +7,11 @@ pub mod consensus_inner; pub mod consensus_trait; pub mod debug_recompute; mod pastset_cache; +pub mod pivot_hint; pub mod pos_handler; +use self::pivot_hint::{PivotHint, PivotHintConfig}; + use super::consensus::consensus_inner::{ confirmation_meter::ConfirmationMeter, consensus_executor::ConsensusExecutor, @@ -152,6 +155,9 @@ pub struct ConsensusConfig { /// The number of extra epochs that we want to keep /// states/receipts/transactions. pub sync_state_epoch_gap: Option, + + /// The file path and checksum for `PivotHint` + pub pivot_hint_conf: Option, } #[derive(Debug)] @@ -250,7 +256,8 @@ impl ConsensusGraph { notifications: Arc, execution_conf: ConsensusExecutionConfiguration, verification_config: VerificationConfig, node_type: NodeType, - pos_verifier: Arc, params: CommonParams, + pos_verifier: Arc, pivot_hint: Option>, + params: CommonParams, ) -> Self { let inner = Arc::new(RwLock::new(ConsensusGraphInner::with_era_genesis( @@ -288,6 +295,7 @@ impl ConsensusGraph { notifications, node_type, pos_verifier, + pivot_hint, ), confirmation_meter, best_info: RwLock::new(Arc::new(Default::default())), @@ -315,7 +323,8 @@ impl ConsensusGraph { notifications: Arc, execution_conf: ConsensusExecutionConfiguration, verification_conf: VerificationConfig, node_type: NodeType, - pos_verifier: Arc, params: CommonParams, + pos_verifier: Arc, pivot_hint: Option>, + params: CommonParams, ) -> Self { let genesis_hash = data_man.get_cur_consensus_era_genesis_hash(); let stable_hash = data_man.get_cur_consensus_era_stable_hash(); @@ -333,6 +342,7 @@ impl ConsensusGraph { verification_conf, node_type, pos_verifier, + pivot_hint, params, ) } diff --git a/crates/cfxcore/core/src/consensus/pivot_hint/config.rs b/crates/cfxcore/core/src/consensus/pivot_hint/config.rs new file mode 100644 index 0000000000..bed1039062 --- /dev/null +++ b/crates/cfxcore/core/src/consensus/pivot_hint/config.rs @@ -0,0 +1,20 @@ +use hash::H256; + +/// Configuration for initializing PivotHint. +#[derive(Clone)] +pub struct PivotHintConfig { + /// Path to the pivot hint file + pub file_path: String, + + /// Expected keccak hash of the Page Digests Part + pub checksum: H256, +} + +impl PivotHintConfig { + pub fn new(file_path: &str, checksum: H256) -> Self { + Self { + file_path: file_path.to_string(), + checksum, + } + } +} diff --git a/crates/cfxcore/core/src/consensus/pivot_hint/header.rs b/crates/cfxcore/core/src/consensus/pivot_hint/header.rs new file mode 100644 index 0000000000..7b66434924 --- /dev/null +++ b/crates/cfxcore/core/src/consensus/pivot_hint/header.rs @@ -0,0 +1,125 @@ +use rangetools::Rangetools; +use std::io::Read; + +pub const HEADER_LENGTH: usize = 28; + +#[derive(Clone, Copy, Debug)] +pub(super) struct PivotHintHeader { + pub minor_interval: u64, + pub major_interval: u64, + pub page_interval: u64, + pub range_max: u64, + pub minor_hash_length: usize, +} + +fn read_u32(mut reader: impl Read) -> Result { + let mut raw = [0u8; 4]; + reader + .read_exact(&mut raw) + .map_err(|e| format!("Cannot load number {:?}", e))?; + Ok(u32::from_le_bytes(raw)) +} + +fn read_u64(mut reader: impl Read) -> Result { + let mut raw = [0u8; 8]; + reader + .read_exact(&mut raw) + .map_err(|e| format!("Cannot load number {:?}", e))?; + Ok(u64::from_le_bytes(raw)) +} + +impl PivotHintHeader { + pub fn from_raw(raw_header: [u8; HEADER_LENGTH]) -> Result { + let mut reader = &raw_header[..]; + + let minor_interval = read_u32(&mut reader).unwrap() as u64; + let major_interval = read_u32(&mut reader).unwrap() as u64; + let page_interval = read_u32(&mut reader).unwrap() as u64; + let range_max = read_u64(&mut reader).unwrap(); + let minor_hash_length = read_u32(&mut reader).unwrap() as usize; + + if major_interval % minor_interval != 0 { + return Err("Inconsistent header params: major_interval".into()); + } + + if page_interval % major_interval != 0 { + return Err("Inconsistent header params: page_interval".into()); + } + + if range_max % page_interval != 0 { + return Err("Inconsistent header params: range_max".into()); + } + + let header = PivotHintHeader { + major_interval, + minor_interval, + minor_hash_length, + range_max, + page_interval, + }; + + let page_bytes = read_u32(&mut reader).unwrap() as usize; + if header.page_bytes() != page_bytes { + return Err("Inconsistent page bytes".into()); + } + assert!(reader.is_empty()); + + Ok(header) + } + + pub fn major_section_bytes(&self) -> usize { + (32 * (self.page_interval / self.major_interval)) as usize + } + + pub fn minor_section_bytes(&self) -> usize { + self.minor_hash_length + * (self.page_interval / self.minor_interval) as usize + } + + pub fn page_bytes(&self) -> usize { + self.major_section_bytes() + self.minor_section_bytes() + } + + pub fn page_number(&self) -> usize { + (self.range_max / self.page_interval) as usize + } + + pub fn compute_check_height( + &self, fork_at: u64, me_height: u64, + ) -> Option { + let major_ticks = ((fork_at - 1) / self.major_interval + 1) + ..=(me_height / self.major_interval); + let availd_major_ticks = 0..(self.range_max / self.major_interval); + let last_valid_major_tick = major_ticks + .intersection(availd_major_ticks) + .into_iter() + .next_back(); + let major_height = + last_valid_major_tick.map(|x| x * self.major_interval); + + if cfg!(test) { + assert!(major_height.map_or(true, |h| h >= fork_at + && h <= me_height + && h % self.major_interval == 0 + && h < self.range_max)); + } + + let minor_ticks = ((fork_at - 1) / self.minor_interval + 1) + ..=(me_height / self.minor_interval); + let availd_minor_ticks = 0..(self.range_max / self.minor_interval); + let last_valid_minor_tick = minor_ticks + .intersection(availd_minor_ticks) + .into_iter() + .next_back(); + let minor_height = + last_valid_minor_tick.map(|x| x * self.minor_interval); + if cfg!(test) { + assert!(minor_height.map_or(true, |h| h >= fork_at + && h <= me_height + && h % self.minor_interval == 0 + && h < self.range_max)); + } + + major_height.or(minor_height) + } +} diff --git a/crates/cfxcore/core/src/consensus/pivot_hint/mod.rs b/crates/cfxcore/core/src/consensus/pivot_hint/mod.rs new file mode 100644 index 0000000000..c8a277a5c8 --- /dev/null +++ b/crates/cfxcore/core/src/consensus/pivot_hint/mod.rs @@ -0,0 +1,270 @@ +//! Pivot hint provides validation support during blockchain synchronization and +//! historical data execution by leveraging trusted pivot chain information from +//! authoritative sources. +//! +//! During the synchronization process of archive nodes, when processing +//! historical data, pivot hints help prevent execution on forked branches and +//! protect against problematic historical states that occurred on +//! mainnet/testnet chains. +//! +//! # File Structure +//! The pivot hint file consists of three main parts: +//! * Header Part (28 bytes): Contains configuration parameters +//! * Page Digests Part: A list of keccak hashes for each page +//! * Pages Part: Sequential storage of all pages +//! +//! # Page Organization +//! Blocks are organized into pages based on several configurable parameters: +//! * `range_max`: Upper bound (exclusive) of block heights for stored hashes +//! * `page_interval`: Number of consecutive blocks in each page +//! * Each page contains: +//! - Major section: Full hashes for blocks every `major_interval` heights +//! - Minor section: Hash prefixes (in length of `minor_hash_length``) for +//! blocks every `minor_interval` heights +//! +//! # Parameter Constraints +//! The following parameters must maintain integer multiple relationships: +//! * `range_max` must be a multiple of `page_interval` +//! * `page_interval` must be a multiple of `major_interval` +//! * `major_interval` must be a multiple of `minor_interval` +//! +//! # Fork Validation +//! During fork validation, when the consensus layer attempts to switch the +//! pivot chain from branch A to branch B, it must provide: +//! * `fork_at`: The first block height where branch A and B diverge +//! * `me_height`: The last block height of branch B +//! * A query interface to retrieve block hashes on branch B within range +//! [fork_at, me_height] +//! +//! The validation process follows these rules: +//! * If [fork_at, me_height] covers with major section records, validation uses +//! the last recorded full hash +//! * If no major section records covered but minor section overlap exists, +//! validation uses minor section records (note: this may allow switching to +//! branches that aren't on the final main chain) +//! * If neither major nor minor section overlap exists, the switch is allowed +//! +//! When `fork_at` exceeds `range_max`, it indicates the fork point is beyond +//! the static file records, and the switch is automatically allowed. +//! +//! # Loading Process +//! 1. Load and validate Header Part parameters +//! 2. Load Page Digests Part and verify against predetermined Pivot Hint +//! Checksum +//! 3. Keep Page Digests in memory +//! 4. Verify each page against Page Digests when loading to prevent corruption + +mod config; +mod header; +mod page; +#[cfg(test)] +mod tests; + +pub use config::PivotHintConfig; +use header::{PivotHintHeader, HEADER_LENGTH}; +use page::PivotHintPage; + +use std::{ + fs::File, + io::{Read, Seek, SeekFrom}, + sync::atomic::{AtomicBool, Ordering}, +}; + +use hash::{keccak, H256}; +use lru_time_cache::LruCache; +use parking_lot::RwLock; + +/// Manages pivot block hash records for chain fork validation during sync +/// process. +pub struct PivotHint { + /// Path to the pivot hint file + file_path: String, + + /// Module status flag. Set to false if error occurs, disabling the module + /// without thread panic. + active: AtomicBool, + + /// Pivot hint header with configuration parameters + header: PivotHintHeader, + + /// LRU cache storing loaded pivot hint pages + pages: RwLock>, + + /// Keccak hashes of all pages, kept in memory for integrity verification + page_digests: Vec, +} + +impl PivotHint { + /// Creates a new PivotHint instance by loading and validating the pivot + /// hint file. + /// + /// # Steps + /// 1. Loads and validates the header + /// 2. Loads page digests and verifies against provided checksum + /// 3. Initializes LRU cache for page data + /// + /// # Arguments + /// * `conf` - Configuration containing file path and expected checksum + /// + /// # Errors + /// * File open/read errors + /// * Header parsing errors + /// * Checksum mismatch + pub fn new(conf: &PivotHintConfig) -> Result { + let mut file = File::open(&conf.file_path) + .map_err(|e| format!("Cannot open file: {:?}", e))?; + let mut raw_header = [0u8; HEADER_LENGTH]; + file.read_exact(&mut raw_header) + .map_err(|e| format!("Cannot load header: {:?}", e))?; + let header = PivotHintHeader::from_raw(raw_header) + .map_err(|e| format!("Cannot parse and check header: {}", e))?; + + let mut raw_page_digests = vec![0u8; header.page_number() * 32]; + file.read_exact(&mut raw_page_digests) + .map_err(|e| format!("Cannot load page digests: {:?}", e))?; + let file_checksum = keccak(&raw_page_digests); + if file_checksum != conf.checksum { + return Err("Incorrect checksum".into()); + } + + let page_digests = raw_page_digests + .chunks_exact(32) + .map(H256::from_slice) + .collect(); + + Ok(Self { + file_path: conf.file_path.clone(), + active: AtomicBool::new(true), + header, + pages: RwLock::new(LruCache::with_capacity(5)), + page_digests, + }) + } + + /// Validates if switching to a target branch is allowed based on pivot hint + /// records. + /// + /// # Arguments + /// * `fork_at` - First block height where the current chain and target + /// branch diverge + /// * `me_height` - Last block height of the target branch + /// * `ancestor_hash_at` - Callback to retrieve block hash at specified + /// height on target branch + /// + /// # Returns + /// Returns whether switching to the fork branch is allowed. + pub fn allow_switch( + &self, fork_at: u64, me_height: u64, + ancestor_hash_at: impl FnOnce(u64) -> H256, + ) -> bool { + if !self.active.load(Ordering::Acquire) { + return true; + } + + if fork_at >= self.header.range_max { + return true; + } + + let check_height = if let Some(check_height) = + self.header.compute_check_height(fork_at, me_height) + { + check_height + } else { + return true; + }; + + let actual_hash = ancestor_hash_at(check_height); + let result = self.check_hash(check_height, actual_hash); + debug!("Pivot hint check switch result {result}. fork_at: {fork_at}, me_height: {me_height}, check_height: {check_height}, fetch_hash: {actual_hash:?}"); + result + } + + pub fn allow_extend(&self, height: u64, hash: H256) -> bool { + if !self.active.load(Ordering::Acquire) { + return true; + } + + if height >= self.header.range_max { + return true; + } + + if height % self.header.minor_interval != 0 { + return true; + } + + let page_number = height / self.header.page_interval; + let page_offset = height % self.header.page_interval; + + let result = self.check_with_page(page_number, |page| { + page.check_hash_at_height(page_offset, hash) + }); + debug!("Pivot hint check extend result {result}. me_height: {height}, fetch_hash: {hash:?}"); + result + } + + pub fn is_active(&self) -> bool { self.active.load(Ordering::Acquire) } + + fn check_hash(&self, height: u64, hash: H256) -> bool { + let page_number = height / self.header.page_interval; + let page_offset = height % self.header.page_interval; + + self.check_with_page(page_number, |page| { + page.check_hash_at_height(page_offset, hash) + }) + } + + fn check_with_page( + &self, page_number: u64, check: impl Fn(&PivotHintPage) -> bool, + ) -> bool { + let mut guard = self.pages.write(); + if let Some(page) = guard.get(&page_number) { + check(page) + } else { + info!("Loading pivot hint page {}", page_number); + let page = match self.load_page(page_number) { + Ok(page) => page, + Err(e) => { + warn!( + "Failed to load pivot hint page {}, pivot hint check disabled: {}", + page_number, e + ); + self.active.store(false, Ordering::Release); + return true; + } + }; + let result = check(&page); + guard.insert(page_number, page); + result + } + } + + fn load_page(&self, page_number: u64) -> Result { + let page_bytes = self.header.page_bytes(); + let start_pos = HEADER_LENGTH as u64 + + self.page_digests.len() as u64 * 32 + + page_number * page_bytes as u64; + + let mut file = File::open(&self.file_path) + .map_err(|e| format!("Cannot open pivot hint file: {:?}", e))?; + + file.seek(SeekFrom::Start(start_pos)) + .map_err(|e| format!("Cannot seek to start position: {:?}", e))?; + + let mut page_content = vec![0u8; page_bytes]; + file.read_exact(&mut page_content[..]) + .map_err(|e| format!("Cannot load the page: {:?}", e))?; + + let expected_page_checksum = + if let Some(hash) = self.page_digests.get(page_number as usize) { + hash + } else { + return Err("Empty page checksum".into()); + }; + + let actual_page_checksum = keccak(&page_content); + if expected_page_checksum != &actual_page_checksum { + return Err("Incorrect checksum".into()); + } + Ok(PivotHintPage::new(page_content, self.header)) + } +} diff --git a/crates/cfxcore/core/src/consensus/pivot_hint/page.rs b/crates/cfxcore/core/src/consensus/pivot_hint/page.rs new file mode 100644 index 0000000000..70f36a5ca6 --- /dev/null +++ b/crates/cfxcore/core/src/consensus/pivot_hint/page.rs @@ -0,0 +1,48 @@ +use hash::H256; + +use super::PivotHintHeader; + +/// A page of block hash data in the pivot hint file. +pub(super) struct PivotHintPage { + /// Configuration parameters + header: PivotHintHeader, + + /// Full block hashes at heights that are multiples of `major_interval` (32 + /// bytes per record) + major_section: Vec, + + /// Hash prefixes at heights that are multiples of `minor_interval` + /// (`minor_hash_length` bytes per record) + minor_section: Vec, +} + +impl PivotHintPage { + pub fn new(mut page_content: Vec, header: PivotHintHeader) -> Self { + let major_section_bytes = header.major_section_bytes(); + let minor_section = page_content.split_off(major_section_bytes); + let major_section = page_content; + Self { + header, + major_section, + minor_section, + } + } + + pub fn check_hash_at_height( + &self, page_offset: u64, actual_hash: H256, + ) -> bool { + if page_offset % self.header.major_interval == 0 { + let major_index = + (page_offset / self.header.major_interval) as usize; + let len = 32; + self.major_section[major_index * len..(major_index + 1) * len] + == actual_hash[..len] + } else { + let minor_index = + (page_offset / self.header.minor_interval) as usize; + let len = self.header.minor_hash_length; + self.minor_section[minor_index * len..(minor_index + 1) * len] + == actual_hash[..len] + } + } +} diff --git a/crates/cfxcore/core/src/consensus/pivot_hint/tests.rs b/crates/cfxcore/core/src/consensus/pivot_hint/tests.rs new file mode 100644 index 0000000000..119cd47b1f --- /dev/null +++ b/crates/cfxcore/core/src/consensus/pivot_hint/tests.rs @@ -0,0 +1,88 @@ +use rand::{thread_rng, RngCore}; +use std::{ + fs::File, + io::{Read, Seek, SeekFrom}, + path::Path, + str::FromStr, +}; + +use hash::H256; + +use super::{PivotHint, PivotHintConfig}; + +pub struct TestHashFile { + file: File, +} + +impl TestHashFile { + fn new() -> Self { + let path = Path::new(env!("CARGO_MANIFEST_DIR")) + .join("test_data") + .join("sample_pivot_hash.bin"); + Self { + file: File::open(path).unwrap(), + } + } + + fn hash_at_height(&mut self, height: u64) -> H256 { + assert!(height < 1_000_000); + assert!(height % 5 == 0); + + self.file.seek(SeekFrom::Start((height / 5) * 32)).unwrap(); + let mut answer = H256::default(); + self.file.read_exact(&mut answer.0).unwrap(); + + answer + } +} + +fn make_test_pivot_hint() -> PivotHint { + let file_path = Path::new(env!("CARGO_MANIFEST_DIR")) + .join("test_data") + .join("sample_pivot_hint.bin"); + let checksum = H256::from_str( + "28dcd783ff03d7f9718e95e52c9d56174d83faaa25aaeb9c6cc1dd7239d3069e", + ) + .unwrap(); + let config = PivotHintConfig::new(file_path.to_str().unwrap(), checksum); + PivotHint::new(&config).unwrap() +} + +#[test] +fn test_pivot_hint() { + let pivot_hint = make_test_pivot_hint(); + let mut test_file = TestHashFile::new(); + + let mut rng = thread_rng(); + for _ in 0..100_000 { + let fork_at = rng.next_u64() % 1_500_000; + if fork_at == 0 { + continue; + } + + let diff = match rng.next_u64() % 3 { + 0 => rng.next_u64() % 10, + 1 => rng.next_u64() % 200, + 2 => rng.next_u64() % (1_500_000 - fork_at), + _ => unreachable!(), + }; + + let me_height = fork_at + diff; + + let success = rng.next_u64() % 2 == 0; + let mut manipulated = false; + + let allow_switch = + pivot_hint.allow_switch(fork_at, me_height, |height| { + let mut hash = test_file.hash_at_height(height); + if !success { + hash.0[0] ^= 0x80; + manipulated = true; + } + hash + }); + + assert_eq!(allow_switch, !manipulated); + assert!(pivot_hint.is_active()); + } +} diff --git a/crates/cfxcore/core/src/sync/utils.rs b/crates/cfxcore/core/src/sync/utils.rs index 9c78fec47a..5ff9b6cf64 100644 --- a/crates/cfxcore/core/src/sync/utils.rs +++ b/crates/cfxcore/core/src/sync/utils.rs @@ -268,6 +268,7 @@ pub fn initialize_synchronization_graph_with_data_manager( get_logs_filter_max_limit: None, sync_state_starting_epoch: None, sync_state_epoch_gap: None, + pivot_hint_conf: None, }, txpool.clone(), statistics.clone(), @@ -281,6 +282,7 @@ pub fn initialize_synchronization_graph_with_data_manager( verification_config.clone(), NodeType::Archive, pos_verifier.clone(), + /* pivot_hint */ None, params, )); diff --git a/crates/cfxcore/core/test_data/sample_pivot_hash.bin b/crates/cfxcore/core/test_data/sample_pivot_hash.bin new file mode 100644 index 0000000000..a6d086f5d4 Binary files /dev/null and b/crates/cfxcore/core/test_data/sample_pivot_hash.bin differ diff --git a/crates/cfxcore/core/test_data/sample_pivot_hint.bin b/crates/cfxcore/core/test_data/sample_pivot_hint.bin new file mode 100644 index 0000000000..e07a79a1d9 Binary files /dev/null and b/crates/cfxcore/core/test_data/sample_pivot_hint.bin differ diff --git a/crates/client/src/common/mod.rs b/crates/client/src/common/mod.rs index a90254ea1b..cc93c354c4 100644 --- a/crates/client/src/common/mod.rs +++ b/crates/client/src/common/mod.rs @@ -27,7 +27,10 @@ use cfx_types::{address_util::AddressUtil, Address, Space, U256}; pub use cfxcore::pos::pos::PosDropHandle; use cfxcore::{ block_data_manager::BlockDataManager, - consensus::pos_handler::{PosConfiguration, PosVerifier}, + consensus::{ + pivot_hint::PivotHint, + pos_handler::{PosConfiguration, PosVerifier}, + }, genesis_block::{self as genesis, genesis_block}, pow::PowComputer, statistics::Statistics, @@ -357,6 +360,11 @@ pub fn initialize_common_modules( let statistics = Arc::new(Statistics::new()); let notifications = Notifications::init(); + let pivot_hint = if let Some(conf) = &consensus_conf.pivot_hint_conf { + Some(Arc::new(PivotHint::new(conf)?)) + } else { + None + }; let consensus = Arc::new(ConsensusGraph::new( consensus_conf, @@ -370,6 +378,7 @@ pub fn initialize_common_modules( verification_config.clone(), node_type, pos_verifier.clone(), + pivot_hint, conf.common_params(), )); diff --git a/crates/client/src/configuration.rs b/crates/client/src/configuration.rs index c3d0533c3a..8dd9aba0ea 100644 --- a/crates/client/src/configuration.rs +++ b/crates/client/src/configuration.rs @@ -33,7 +33,8 @@ use cfxcore::{ }, consensus::{ consensus_inner::consensus_executor::ConsensusExecutionConfiguration, - pos_handler::PosVerifier, ConsensusConfig, ConsensusInnerConfig, + pivot_hint::PivotHintConfig, pos_handler::PosVerifier, ConsensusConfig, + ConsensusInnerConfig, }, consensus_internal_parameters::*, consensus_parameters::*, @@ -148,6 +149,8 @@ build_config! { (heavy_block_difficulty_ratio, (u64), HEAVY_BLOCK_DEFAULT_DIFFICULTY_RATIO) (genesis_accounts, (Option), None) (genesis_secrets, (Option), None) + (pivot_hint_path, (Option), None) + (pivot_hint_checksum, (Option), None) (initial_difficulty, (Option), None) (tanzanite_transition_height, (u64), TANZANITE_HEIGHT) (hydra_transition_number, (Option), Some(68845000)) @@ -619,6 +622,20 @@ impl Configuration { } else { self.raw_conf.enable_optimistic_execution }; + let pivot_hint_conf = match ( + &self.raw_conf.pivot_hint_path, + &self.raw_conf.pivot_hint_checksum, + ) { + (Some(path), Some(checksum)) => { + let checksum = H256::from_str(checksum) + .expect("Cannot parse `pivot_hint_checksum` as hex string"); + Some(PivotHintConfig::new(path, checksum)) + } + (None, None) => None, + _ => { + panic!("`pivot_hint_path` and `pivot_hint_checksum` must be both set or both unset"); + } + }; let mut conf = ConsensusConfig { chain_id: self.chain_id_params(), inner_conf: ConsensusInnerConfig { @@ -669,6 +686,7 @@ impl Configuration { get_logs_filter_max_limit: self.raw_conf.get_logs_filter_max_limit, sync_state_starting_epoch: self.raw_conf.sync_state_starting_epoch, sync_state_epoch_gap: self.raw_conf.sync_state_epoch_gap, + pivot_hint_conf, }; match self.raw_conf.node_type { Some(NodeType::Archive) => {