diff --git a/Cargo.lock b/Cargo.lock index 2647b6788..e4702045e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13771,6 +13771,7 @@ dependencies = [ "sp-runtime 39.0.5", "sp-state-machine 0.43.0", "sp-trie 29.0.0", + "strum 0.26.3", "thiserror 1.0.69", "trie-db 0.29.1", ] diff --git a/Cargo.toml b/Cargo.toml index 64ffcc069..696dcbf41 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -65,6 +65,7 @@ scale-info = { version = "2.11.0", default-features = false, features = [ serde = { version = "1.0.210", default-features = false } serde_json = { version = "1.0.121", default-features = false } smallvec = "1.11.0" +strum = { version = "0.26.3", features = ["derive"] } thiserror = "1.0.48" tokio = "1.36.0" toml = "0.8.19" diff --git a/client/blockchain-service/src/utils.rs b/client/blockchain-service/src/utils.rs index 1b67ed22a..7be14eda0 100644 --- a/client/blockchain-service/src/utils.rs +++ b/client/blockchain-service/src/utils.rs @@ -154,8 +154,8 @@ where // - The size of the route is equal to `MAX_BLOCKS_BEHIND_TO_CATCH_UP_ROOT_CHANGES`, or // - The parent block is not found, or // - We reach the last best block processed. - let mut route = vec![new_block_info.clone().into()]; - let mut last_block_added = new_block_info.clone(); + let mut route = vec![new_block_info.into()]; + let mut last_block_added = new_block_info; loop { // Check if we are at the genesis block. if last_block_added.number == BlockNumber::zero() { @@ -212,7 +212,7 @@ where // The first element in the route is the last best block processed, which will also be the // `pivot`, so it will be ignored when processing the `tree_route`. - route.push(last_best_block.clone().into()); + route.push(last_best_block.into()); // Revert the route so that it is in ascending order of blocks, from the last best block processed up to the new imported best block. route.reverse(); @@ -244,7 +244,7 @@ where .into_iter() .chain(std::iter::once(&common_block)) .chain(enacted) - .chain(std::iter::once(&new_block_info.clone().into())) + .chain(std::iter::once(&new_block_info.into())) .cloned() .collect(); diff --git a/client/file-manager/Cargo.toml b/client/file-manager/Cargo.toml index ae8b7b05c..5a78868e5 100644 --- a/client/file-manager/Cargo.toml +++ b/client/file-manager/Cargo.toml @@ -23,6 +23,7 @@ kvdb-rocksdb = { workspace = true } kvdb-memorydb = { workspace = true } log = { workspace = true } serde_json = { workspace = true } +strum = { workspace = true } thiserror = { workspace = true } trie-db = { workspace = true } diff --git a/client/file-manager/src/in_memory.rs b/client/file-manager/src/in_memory.rs index ef1cf273a..a3edcd91c 100644 --- a/client/file-manager/src/in_memory.rs +++ b/client/file-manager/src/in_memory.rs @@ -31,16 +31,6 @@ impl FileDataTrie for InMemoryFileDataTrie { &self.root } - fn stored_chunks_count(&self) -> Result { - let trie = TrieDBBuilder::::new(&self.memdb, &self.root).build(); - let trie_iter = trie - .iter() - .map_err(|_| FileStorageError::FailedToConstructTrieIter)?; - let stored_chunks = trie_iter.count() as u64; - - Ok(stored_chunks) - } - fn generate_proof(&self, chunk_ids: &Vec) -> Result { let recorder: Recorder = Recorder::default(); @@ -152,6 +142,7 @@ where pub file_data: HashMap, InMemoryFileDataTrie>, pub bucket_prefix_map: HashSet<[u8; 64]>, pub exclude_list: HashMap>>, + pub chunk_counts: HashMap, u64>, } impl InMemoryFileStorage @@ -172,6 +163,7 @@ where file_data: HashMap::new(), bucket_prefix_map: HashSet::new(), exclude_list, + chunk_counts: HashMap::new(), } } } @@ -204,7 +196,7 @@ where .as_str(), ); - let stored_chunks = file_data.stored_chunks_count()?; + let stored_chunks = self.stored_chunks_count(file_key)?; if metadata.chunks_count() != stored_chunks { return Err(FileStorageError::IncompleteFile); } @@ -224,9 +216,17 @@ where .to_file_key_proof(metadata.clone())) } + fn stored_chunks_count(&self, key: &HasherOutT) -> Result { + self.chunk_counts + .get(key) + .copied() + .ok_or(FileStorageError::FileDoesNotExist) + } + fn delete_file(&mut self, key: &HasherOutT) -> Result<(), FileStorageError> { self.metadata.remove(key); self.file_data.remove(key); + self.chunk_counts.remove(key); Ok(()) } @@ -252,22 +252,11 @@ where .as_str(), ); - let stored_chunks = file_data.stored_chunks_count()?; - if metadata.chunks_count() != stored_chunks { - return Ok(false); - } - - if metadata.fingerprint - != file_data - .get_root() - .as_ref() - .try_into() - .expect("Hasher output mismatch!") - { + if metadata.fingerprint != file_data.get_root().as_ref().into() { return Ok(false); } - Ok(true) + Ok(metadata.chunks_count() == self.stored_chunks_count(key)?) } fn insert_file( @@ -286,6 +275,9 @@ where panic!("Key already associated with File Data, but not with File Metadata. Possible inconsistency between them."); } + // Initialize chunk count to 0 + self.chunk_counts.insert(key, 0); + let full_key = [metadata.bucket_id.as_slice(), key.as_ref()].concat(); self.bucket_prefix_map.insert(full_key.try_into().unwrap()); @@ -303,6 +295,15 @@ where } self.metadata.insert(key, metadata.clone()); + // Count all chunks in the file trie + let trie = TrieDBBuilder::::new(&file_data.memdb, &file_data.get_root()).build(); + let chunk_count = trie + .iter() + .map_err(|_| FileStorageError::FailedToConstructTrieIter)? + .count(); + + self.chunk_counts.insert(key, chunk_count as u64); + let previous = self.file_data.insert(key, file_data); if previous.is_some() { panic!("Key already associated with File Data, but not with File Metadata. Possible inconsistency between them."); @@ -314,13 +315,6 @@ where Ok(()) } - fn stored_chunks_count(&self, key: &HasherOutT) -> Result { - let file_data = self.file_data.get(key); - let file_data = file_data.ok_or(FileStorageError::FileDoesNotExist)?; - - file_data.stored_chunks_count() - } - fn get_chunk( &self, file_key: &HasherOutT, @@ -352,11 +346,18 @@ where .as_str(), ); - // Check if we have all the chunks for the file. - let stored_chunks = file_data - .stored_chunks_count() - .map_err(|_| FileStorageWriteError::FailedToConstructTrieIter)?; - if metadata.chunks_count() != stored_chunks { + // Increment chunk count + let current_count = self + .chunk_counts + .get(file_key) + .ok_or(FileStorageWriteError::FailedToGetStoredChunksCount)?; + let new_count = current_count + .checked_add(1) + .ok_or(FileStorageWriteError::ChunkCountOverflow)?; + self.chunk_counts.insert(*file_key, new_count); + + // Check if we have all the chunks for the file using the count + if metadata.chunks_count() != new_count { return Ok(FileStorageWriteOutcome::FileIncomplete); } @@ -393,6 +394,7 @@ where for key in keys_to_delete { self.metadata.remove(&key); self.file_data.remove(&key); + self.chunk_counts.remove(&key); } Ok(()) @@ -451,6 +453,19 @@ mod tests { use sp_runtime::AccountId32; use sp_trie::LayoutV1; + fn stored_chunks_count( + file_trie: &InMemoryFileDataTrie>, + ) -> Result { + let trie = + TrieDBBuilder::>::new(&file_trie.memdb, &file_trie.root).build(); + let trie_iter = trie + .iter() + .map_err(|_| FileStorageError::FailedToConstructTrieIter)?; + let stored_chunks = trie_iter.count() as u64; + + Ok(stored_chunks) + } + #[test] fn file_trie_create_empty_works() { let file_trie = InMemoryFileDataTrie::>::new(); @@ -501,11 +516,11 @@ mod tests { let mut file_trie = InMemoryFileDataTrie::>::new(); file_trie.write_chunk(&chunk_ids[0], &chunks[0]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 1); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 1); assert!(file_trie.get_chunk(&chunk_ids[0]).is_ok()); file_trie.write_chunk(&chunk_ids[1], &chunks[1]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 2); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 2); assert!(file_trie.get_chunk(&chunk_ids[1]).is_ok()); } @@ -522,15 +537,15 @@ mod tests { let mut file_trie = InMemoryFileDataTrie::>::new(); file_trie.write_chunk(&chunk_ids[0], &chunks[0]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 1); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 1); assert!(file_trie.get_chunk(&chunk_ids[0]).is_ok()); file_trie.write_chunk(&chunk_ids[1], &chunks[1]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 2); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 2); assert!(file_trie.get_chunk(&chunk_ids[1]).is_ok()); file_trie.write_chunk(&chunk_ids[2], &chunks[2]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 3); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 3); assert!(file_trie.get_chunk(&chunk_ids[2]).is_ok()); let file_proof = file_trie.generate_proof(&chunk_ids).unwrap(); @@ -554,15 +569,15 @@ mod tests { let mut file_trie = InMemoryFileDataTrie::>::new(); file_trie.write_chunk(&chunk_ids[0], &chunks[0]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 1); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 1); assert!(file_trie.get_chunk(&chunk_ids[0]).is_ok()); file_trie.write_chunk(&chunk_ids[1], &chunks[1]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 2); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 2); assert!(file_trie.get_chunk(&chunk_ids[1]).is_ok()); file_trie.write_chunk(&chunk_ids[2], &chunks[2]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 3); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 3); assert!(file_trie.get_chunk(&chunk_ids[2]).is_ok()); file_trie.delete().unwrap(); @@ -570,7 +585,7 @@ mod tests { assert!(file_trie.get_chunk(&chunk_ids[1]).is_err()); assert!(file_trie.get_chunk(&chunk_ids[2]).is_err()); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 0); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 0); } #[test] @@ -590,15 +605,15 @@ mod tests { let mut file_trie = InMemoryFileDataTrie::>::new(); file_trie.write_chunk(&chunk_ids[0], &chunks[0]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 1); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 1); assert!(file_trie.get_chunk(&chunk_ids[0]).is_ok()); file_trie.write_chunk(&chunk_ids[1], &chunks[1]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 2); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 2); assert!(file_trie.get_chunk(&chunk_ids[1]).is_ok()); file_trie.write_chunk(&chunk_ids[2], &chunks[2]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 3); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 3); assert!(file_trie.get_chunk(&chunk_ids[2]).is_ok()); let file_metadata = FileMetadata { @@ -637,15 +652,15 @@ mod tests { let mut file_trie = InMemoryFileDataTrie::>::new(); file_trie.write_chunk(&chunk_ids[0], &chunks[0]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 1); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 1); assert!(file_trie.get_chunk(&chunk_ids[0]).is_ok()); file_trie.write_chunk(&chunk_ids[1], &chunks[1]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 2); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 2); assert!(file_trie.get_chunk(&chunk_ids[1]).is_ok()); file_trie.write_chunk(&chunk_ids[2], &chunks[2]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 3); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 3); assert!(file_trie.get_chunk(&chunk_ids[2]).is_ok()); let file_metadata = FileMetadata { @@ -690,15 +705,15 @@ mod tests { let mut file_trie = InMemoryFileDataTrie::>::new(); file_trie.write_chunk(&chunk_ids[0], &chunks[0]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 1); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 1); assert!(file_trie.get_chunk(&chunk_ids[0]).is_ok()); file_trie.write_chunk(&chunk_ids[1], &chunks[1]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 2); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 2); assert!(file_trie.get_chunk(&chunk_ids[1]).is_ok()); file_trie.write_chunk(&chunk_ids[2], &chunks[2]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 3); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 3); assert!(file_trie.get_chunk(&chunk_ids[2]).is_ok()); let file_metadata = FileMetadata { diff --git a/client/file-manager/src/rocksdb.rs b/client/file-manager/src/rocksdb.rs index 3254d8b91..a08004168 100644 --- a/client/file-manager/src/rocksdb.rs +++ b/client/file-manager/src/rocksdb.rs @@ -20,23 +20,54 @@ use crate::{ LOG_TARGET, }; use codec::{Decode, Encode}; +use strum::EnumCount; + +#[derive(Debug, Clone, Copy, EnumCount)] +pub enum Column { + /// Stores keys of 32 bytes representing the `file_key` with values being the serialized [`FileMetadata`]. + Metadata, + /// Stores keys of 32 bytes representing the final `root` of the file based on the [`FileMetadata::fingerprint`] with values + /// being the current `root` of the constructed file trie based on the chunks stored in the [`Column::Chunks`] for that `file_key`. + /// + /// Used for keeping track of the current root of the file Trie for each `file_key`. + Roots, + /// Stores keys of 32 bytes representing the `file_key`. + /// + /// Used for storing the chunks of the file. + Chunks, + /// Stores keys of 32 bytes representing the `file_key`. + /// + /// Used for counting the number of chunks currently stored for the `file_key`. + ChunkCount, + /// Stores keys of 64 bytes representing the concatenation of `bucket_id` and `file_key`. + /// + /// Used for deleting all files in a bucket efficiently. + BucketPrefix, + /// Exclude* columns stores keys of 32 bytes representing the `file_key` with empty values. + /// + /// These columns are used primarily to mark file keys as being excluded from certain operations. + ExcludeFile, + ExcludeUser, + ExcludeBucket, + ExcludeFingerprint, +} + +impl Into for Column { + fn into(self) -> u32 { + self as u32 + } +} -const METADATA_COLUMN: u32 = 0; -const ROOTS_COLUMN: u32 = 1; -const CHUNKS_COLUMN: u32 = 2; -const BUCKET_PREFIX_COLUMN: u32 = 3; -const EXCLUDE_FILE_COLUMN: u32 = 4; -const EXCLUDE_USER_COLUMN: u32 = 5; -const EXCLUDE_BUCKET_COLUMN: u32 = 6; -const EXCLUDE_FINGERPRINT_COLUMN: u32 = 7; +// Replace NUMBER_OF_COLUMNS definition +const NUMBER_OF_COLUMNS: u32 = Column::COUNT as u32; // Helper function to map ExcludeType enum to their matching rocksdb column. fn get_exclude_type_db_column(exclude_type: ExcludeType) -> u32 { match exclude_type { - ExcludeType::File => EXCLUDE_FILE_COLUMN, - ExcludeType::User => EXCLUDE_USER_COLUMN, - ExcludeType::Bucket => EXCLUDE_BUCKET_COLUMN, - ExcludeType::Fingerprint => EXCLUDE_FINGERPRINT_COLUMN, + ExcludeType::File => Column::ExcludeFile.into(), + ExcludeType::User => Column::ExcludeUser.into(), + ExcludeType::Bucket => Column::ExcludeBucket.into(), + ExcludeType::Fingerprint => Column::ExcludeFingerprint.into(), } } @@ -46,7 +77,7 @@ fn open_or_creating_rocksdb(db_path: String) -> io::Result io::Result { pub db: Arc, pub _marker: std::marker::PhantomData, @@ -70,6 +102,8 @@ where DB: KeyValueDB, HasherOutT: TryFrom<[u8; H_LENGTH]>, { + /// Writes a transaction to the database. + /// Returns an error if the write operation fails. fn write(&mut self, transaction: DBTransaction) -> Result<(), ErrorT> { self.db.write(transaction).map_err(|e| { error!(target: LOG_TARGET, "Failed to write to DB: {}", e); @@ -79,6 +113,8 @@ where Ok(()) } + /// Reads data from the specified column and key. + /// Returns the value if found or None if the key doesn't exist. fn read(&self, column: u32, key: &[u8]) -> Result>, ErrorT> { let value = self.db.get(column, key.as_ref()).map_err(|e| { warn!(target: LOG_TARGET, "Failed to read from DB: {}", e); @@ -101,13 +137,16 @@ impl Clone for StorageDb { impl Storage> for StorageDb { fn get(&self, key: &HasherOutT, prefix: Prefix) -> Result, String> { let prefixed_key = prefixed_key::>(key, prefix); - self.db.get(CHUNKS_COLUMN, &prefixed_key).map_err(|e| { - warn!(target: LOG_TARGET, "Failed to read from DB: {}", e); - format!("Failed to read from DB: {}", e) - }) + self.db + .get(Column::Chunks.into(), &prefixed_key) + .map_err(|e| { + warn!(target: LOG_TARGET, "Failed to read from DB: {}", e); + format!("Failed to read from DB: {}", e) + }) } } +/// Converts raw bytes into a [`HasherOutT`]. fn convert_raw_bytes_to_hasher_out(key: Vec) -> Result, ErrorT> where T: TrieLayout, @@ -126,6 +165,8 @@ where Ok(key) } +/// File data trie implementation using RocksDB for persistent storage. +/// Manages file chunks and their proofs in a merkle trie structure. pub struct RocksDbFileDataTrie { // Persistent storage. storage: StorageDb, @@ -141,6 +182,7 @@ where DB: KeyValueDB, HasherOutT: TryFrom<[u8; H_LENGTH]>, { + /// Creates a new empty file data trie. fn new(storage: StorageDb) -> Self { let (overlay, root) = PrefixedMemoryDB::>::default_with_root(); @@ -151,6 +193,7 @@ where } } + /// Creates a file data trie from existing root and storage. fn from_existing(storage: StorageDb, root: &HasherOutT) -> Self { RocksDbFileDataTrie:: { root: *root, @@ -159,9 +202,8 @@ where } } - /// Persists the changes applied to the overlay. - /// If the root has not changed, the commit will be skipped. - /// The `overlay` will be cleared. + /// Commits changes in the overlay to persistent storage. + /// Skips if root hasn't changed. Clears the overlay after commit. pub fn commit(&mut self, new_root: HasherOutT) -> Result<(), ErrorT> { // Skip commit if the root has not changed. if self.root == new_root { @@ -182,15 +224,15 @@ where Ok(()) } - /// Build [`DBTransaction`] from the overlay and clear it. + /// Builds a database transaction from the overlay and clears it. fn changes(&mut self) -> DBTransaction { let mut transaction = DBTransaction::new(); for (key, (value, rc)) in self.overlay.drain() { if rc <= 0 { - transaction.delete(CHUNKS_COLUMN, &key); + transaction.delete(Column::Chunks.into(), &key); } else { - transaction.put_vec(CHUNKS_COLUMN, &key, value); + transaction.put_vec(Column::Chunks.into(), &key, value); } } @@ -221,28 +263,13 @@ where DB: KeyValueDB, HasherOutT: TryFrom<[u8; H_LENGTH]>, { - // Returns internal root representation kept for immediate access. + /// Returns the current root hash of the trie. fn get_root(&self) -> &HasherOutT { &self.root } - // Returns the amount of chunks currently in storage. - fn stored_chunks_count(&self) -> Result { - let db = self.as_hash_db(); - let trie = TrieDBBuilder::::new(&db, &self.root).build(); - - let count = trie - .iter() - .map_err(|e| { - error!(target: LOG_TARGET, "Failed to construct Trie iterator: {}", e); - FileStorageError::FailedToConstructTrieIter - })? - .count(); - - Ok(count as u64) - } - - // Generates a [`FileProof`] for requested chunks. + /// Generates a proof for the specified chunk IDs. + /// Returns a [`FileProof`] containing the merkle proof and fingerprint. fn generate_proof(&self, chunk_ids: &Vec) -> Result { let db = self.as_hash_db(); let recorder: Recorder = Recorder::default(); @@ -289,6 +316,8 @@ where } // TODO: make it accept a list of chunks to be retrieved + /// Retrieves a chunk from the trie by its ID. + /// Returns error if chunk doesn't exist or retrieval fails. fn get_chunk(&self, chunk_id: &ChunkId) -> Result { let db = self.as_hash_db(); let trie = TrieDBBuilder::::new(&db, &self.root).build(); @@ -311,6 +340,8 @@ where } // TODO: make it accept a list of chunks to be written + /// Writes a chunk to the trie with its ID. + /// Returns error if write fails or chunk already exists. fn write_chunk( &mut self, chunk_id: &ChunkId, @@ -356,29 +387,35 @@ where Ok(()) } - // Deletes itself from the underlying db. + /// Deletes all chunks and data associated with this file trie. fn delete(&mut self) -> Result<(), FileStorageWriteError> { let mut root = self.root; - let stored_chunks_count = self.stored_chunks_count().map_err(|e| { - error!(target: LOG_TARGET, "{:?}", e); - FileStorageWriteError::FailedToGetStoredChunksCount - })?; let db = self.as_hash_db_mut(); let trie_root_key = root; let mut trie = TrieDBMutBuilder::::from_existing(db, &mut root).build(); - for chunk_id in 0..stored_chunks_count { - trie.remove(&ChunkId::new(chunk_id as u64).as_trie_key()) - .map_err(|e| { - error!(target: LOG_TARGET, "Failed to delete chunk from RocksDb: {}", e); - FileStorageWriteError::FailedToDeleteChunk - })?; + let mut chunk_id = 0; + loop { + let chunk_id_struct = ChunkId::new(chunk_id as u64); + if !trie.contains(&chunk_id_struct.as_trie_key()).map_err(|e| { + error!(target: LOG_TARGET, "Failed to check if chunk exists: {}", e); + FileStorageWriteError::FailedToDeleteChunk + })? { + break; + } + + trie.remove(&chunk_id_struct.as_trie_key()).map_err(|e| { + error!(target: LOG_TARGET, "Failed to delete chunk from RocksDb: {}", e); + FileStorageWriteError::FailedToDeleteChunk + })?; + + chunk_id += 1; } // Remove the root from the trie. trie.remove(trie_root_key.as_ref()).map_err(|e| { error!(target: LOG_TARGET, "Failed to delete root from RocksDb: {}", e); - FileStorageWriteError::FailedToDeleteChunk + FileStorageWriteError::FailedToDeleteRoot })?; let new_root = *trie.root(); @@ -387,7 +424,7 @@ where // TODO: improve error handling // Commit the changes to disk. - self.commit(trie_root_key).map_err(|e| { + self.commit(new_root).map_err(|e| { error!(target: LOG_TARGET, "Failed to commit changes to persistent storage: {}", e); FileStorageWriteError::FailedToPersistChanges })?; @@ -445,6 +482,7 @@ where } } +/// Manages file metadata, chunks, and proofs using RocksDB as backend. pub struct RocksDbFileStorage where T: TrieLayout + 'static, @@ -460,6 +498,7 @@ where DB: KeyValueDB, HasherOutT: TryFrom<[u8; H_LENGTH]>, { + /// Creates a new file storage instance with the given storage backend. pub fn new(storage: StorageDb) -> Self { Self { storage } } @@ -478,6 +517,42 @@ where _marker: Default::default(), }) } + + /// Constructs a [`RocksDbFileDataTrie`] from the given [`FileMetadata`]. + /// + /// Since files can be partially uploaded (i.e. not all chunks have been inserted to result in the root being the file metadata's fingerprint), + /// the constructed trie is based on the current `partial_root` representing the current state of the file we are interested in. + fn get_file_trie( + &self, + metadata: &FileMetadata, + ) -> Result, FileStorageError> + where + T: TrieLayout + Send + Sync + 'static, + DB: KeyValueDB + 'static, + { + let b_fingerprint = metadata.fingerprint.as_ref(); + let h_fingerprint = + convert_raw_bytes_to_hasher_out::(b_fingerprint.to_vec()).map_err(|e| { + error!(target: LOG_TARGET, "{:?}", e); + FileStorageError::FailedToParseFingerprint + })?; + let raw_partial_root = self + .storage + .read(Column::Roots.into(), h_fingerprint.as_ref()) + .map_err(|e| { + error!(target: LOG_TARGET, "{:?}", e); + FileStorageError::FailedToReadStorage + })? + .expect("Failed to find partial root"); + let mut partial_root = + convert_raw_bytes_to_hasher_out::(raw_partial_root).map_err(|e| { + error!(target: LOG_TARGET, "{:?}", e); + FileStorageError::FailedToParsePartialRoot + })?; + let file_trie = + RocksDbFileDataTrie::::from_existing(self.storage.clone(), &mut partial_root); + Ok(file_trie) + } } impl FileStorage for RocksDbFileStorage @@ -488,81 +563,62 @@ where { type FileDataTrie = RocksDbFileDataTrie; + /// Creates a new empty file data trie instance. fn new_file_data_trie(&self) -> Self::FileDataTrie { RocksDbFileDataTrie::new(self.storage.clone()) } + /// Retrieves a chunk by file key and chunk ID. fn get_chunk( &self, - key: &HasherOutT, + file_key: &HasherOutT, chunk_id: &ChunkId, ) -> Result { let metadata = self - .get_metadata(key)? + .get_metadata(file_key)? .ok_or(FileStorageError::FileDoesNotExist)?; - let raw_final_root = metadata.fingerprint.as_ref(); - let final_root = - convert_raw_bytes_to_hasher_out::(raw_final_root.to_vec()).map_err(|e| { - error!(target: LOG_TARGET,"{:?}", e); - FileStorageError::FailedToParseFingerprint - })?; + let file_trie = self.get_file_trie(&metadata)?; - let raw_partial_root = self + file_trie.get_chunk(chunk_id) + } + + /// Returns the number of chunks currently stored for a given file key tracked by [`CHUNK_COUNT_COLUMN`]. + fn stored_chunks_count(&self, file_key: &HasherOutT) -> Result { + // Read from CHUNK_COUNT_COLUMN using the file key + let current_count = self .storage - .read(ROOTS_COLUMN, final_root.as_ref()) + .read(Column::ChunkCount.into(), file_key.as_ref()) .map_err(|e| { error!(target: LOG_TARGET, "{:?}", e); FileStorageError::FailedToReadStorage })? - .expect("Failed to find partial root"); + .map(|bytes| u64::from_le_bytes(bytes.try_into().unwrap())) + .unwrap_or(0); - let mut partial_root = - convert_raw_bytes_to_hasher_out::(raw_partial_root).map_err(|e| { - error!(target: LOG_TARGET, "{:?}", e); - FileStorageError::FailedToParsePartialRoot - })?; - - let file_trie = - RocksDbFileDataTrie::::from_existing(self.storage.clone(), &mut partial_root); - - file_trie.get_chunk(chunk_id) + Ok(current_count) } + /// Writes a chunk to storage with file key and chunk ID. + /// + /// Returns [`FileStorageWriteOutcome`] indicating if file is complete. This outcome is based on + /// the current number of chunks stored (tracked by [`CHUNK_COUNT_COLUMN`]) and the file metadata's [`FileMetadata::chunks_count`]. fn write_chunk( &mut self, - key: &HasherOutT, + file_key: &HasherOutT, chunk_id: &ChunkId, data: &Chunk, ) -> Result { let metadata = self - .get_metadata(key) + .get_metadata(file_key) .map_err(|_| FileStorageWriteError::FailedToParseFileMetadata)? .ok_or(FileStorageWriteError::FileDoesNotExist)?; - let raw_final_root = metadata.fingerprint.as_ref(); - let final_root = - convert_raw_bytes_to_hasher_out::(raw_final_root.to_vec()).map_err(|e| { - error!(target: LOG_TARGET, "{:?}", e); - FileStorageWriteError::FailedToParseFingerprint - })?; - - let raw_partial_root = self - .storage - .read(ROOTS_COLUMN, final_root.as_ref()) - .map_err(|e| { - error!(target: LOG_TARGET, "{:?}", e); - FileStorageWriteError::FailedToReadStorage - })? - .expect("Failed to find partial root"); - let mut partial_root = - convert_raw_bytes_to_hasher_out::(raw_partial_root).map_err(|e| { - error!(target: LOG_TARGET, "{:?}", e); - FileStorageWriteError::FailedToParsePartialRoot - })?; + let mut file_trie = self.get_file_trie(&metadata).map_err(|e| { + error!(target: LOG_TARGET, "{:?}", e); + FileStorageWriteError::FailedToContructFileTrie + })?; - let mut file_trie = - RocksDbFileDataTrie::::from_existing(self.storage.clone(), &mut partial_root); file_trie.write_chunk(chunk_id, data).map_err(|e| { error!(target: LOG_TARGET, "{:?}", e); FileStorageWriteError::FailedToInsertFileChunk @@ -571,65 +627,71 @@ where // Update partial root. let new_partial_root = file_trie.get_root(); let mut transaction = DBTransaction::new(); - transaction.put(ROOTS_COLUMN, raw_final_root, new_partial_root.as_ref()); + transaction.put( + Column::Roots.into(), + metadata.fingerprint.as_ref(), + new_partial_root.as_ref(), + ); + + // Get current chunk count or initialize to 0 + let current_count = self.stored_chunks_count(file_key).map_err(|e| { + error!(target: LOG_TARGET, "{:?}", e); + FileStorageWriteError::FailedToGetStoredChunksCount + })?; + + // Increment chunk count. + // This should never overflow unless there is a bug or we support file sizes as large as 16 exabytes. + // Since this is executed within the context of a write lock in the layer above, we should not have any chunk count syncing issues. + let new_count = current_count + .checked_add(1) + .ok_or(FileStorageWriteError::ChunkCountOverflow)?; + transaction.put( + Column::ChunkCount.into(), + file_key.as_ref(), + &new_count.to_le_bytes(), + ); + self.storage.write(transaction).map_err(|e| { error!(target: LOG_TARGET,"{:?}", e); FileStorageWriteError::FailedToUpdatePartialRoot })?; - // Check if we have all the chunks for the file. - let stored_chunks = file_trie.stored_chunks_count().map_err(|e| { - error!(target: LOG_TARGET,"{:?}", e); - FileStorageWriteError::FailedToConstructTrieIter - })?; - if metadata.chunks_count() != stored_chunks { + // Check if we have all the chunks for the file using the count + if metadata.chunks_count() != new_count { return Ok(FileStorageWriteOutcome::FileIncomplete); } Ok(FileStorageWriteOutcome::FileComplete) } - /// Check if all the chunks of a file are stored. - fn is_file_complete(&self, key: &HasherOutT) -> Result { + /// Checks if all chunks are stored for a given file key. + fn is_file_complete(&self, file_key: &HasherOutT) -> Result { let metadata = self - .get_metadata(key)? + .get_metadata(file_key)? .ok_or(FileStorageError::FileDoesNotExist)?; - let raw_final_root = metadata.fingerprint.as_ref(); - let final_root = - convert_raw_bytes_to_hasher_out::(raw_final_root.to_vec()).map_err(|e| { - error!(target: LOG_TARGET,"{:?}", e); - FileStorageError::FailedToParseFingerprint - })?; + let stored_chunks = self.stored_chunks_count(file_key)?; - let raw_partial_root = self - .storage - .read(ROOTS_COLUMN, final_root.as_ref()) - .map_err(|e| { - error!(target: LOG_TARGET,"{:?}", e); - FileStorageError::FailedToReadStorage - })? - .expect("Failed to find partial root"); + let file_trie = self.get_file_trie(&metadata)?; - let mut partial_root = - convert_raw_bytes_to_hasher_out::(raw_partial_root).map_err(|e| { - error!(target: LOG_TARGET,"{:?}", e); - FileStorageError::FailedToParsePartialRoot - })?; - - let file_trie = - RocksDbFileDataTrie::::from_existing(self.storage.clone(), &mut partial_root); + if metadata.fingerprint + != file_trie + .get_root() + .as_ref() + .try_into() + .expect("Hasher output mismatch!") + { + return Ok(false); + } - let stored_chunks = file_trie.stored_chunks_count()?; Ok(metadata.chunks_count() == stored_chunks) } - /// Stores file metadata and an empty root. - /// Should only be used if no chunks have been written yet. - /// Otherwise use [`Self::insert_file_with_data`] + /// Stores file metadata with an empty root. + /// Should be used before writing any chunks using [`Self::write_chunk`]. fn insert_file( &mut self, - key: HasherOutT, + file_key: HasherOutT, metadata: FileMetadata, ) -> Result<(), FileStorageError> { let mut transaction = DBTransaction::new(); @@ -639,13 +701,24 @@ where })?; let (_, empty_root) = PrefixedMemoryDB::>::default_with_root(); - transaction.put(METADATA_COLUMN, key.as_ref(), &serialized_metadata); + transaction.put( + Column::Metadata.into(), + file_key.as_ref(), + &serialized_metadata, + ); // Stores an empty root to allow for later initialization of the trie. transaction.put( - ROOTS_COLUMN, + Column::Roots.into(), metadata.fingerprint.as_ref(), empty_root.as_ref(), ); + // Initialize chunk count to 0 + transaction.put( + Column::ChunkCount.into(), + file_key.as_ref(), + &0u64.to_le_bytes(), + ); + self.storage.write(transaction).map_err(|e| { error!(target: LOG_TARGET,"{:?}", e); FileStorageError::FailedToWriteToStorage @@ -656,10 +729,16 @@ where /// Stores file information with its (partial or final) root. /// Should be used if any chunks have already been written. - /// Otherwise use [`Self::insert_file`] + /// Otherwise use [`Self::insert_file`]. + /// + /// This is an expensive operation since it assumes that the file chunks were written + /// via the [`RocksDbFileDataTrie::write_chunk`] method instead of [`Self::write_chunk`] and + /// therefore iterates over all keys in `file_data` to count the number of chunks and update + /// the chunk count in the [`CHUNK_COUNT_COLUMN`] column. This data is necessary to + /// [`Self::generate_proof`]s for the file. fn insert_file_with_data( &mut self, - key: HasherOutT, + file_key: HasherOutT, metadata: FileMetadata, file_data: Self::FileDataTrie, ) -> Result<(), FileStorageError> { @@ -670,24 +749,45 @@ where let mut transaction = DBTransaction::new(); - transaction.put(METADATA_COLUMN, key.as_ref(), &raw_metadata); + transaction.put(Column::Metadata.into(), file_key.as_ref(), &raw_metadata); // Stores the current root of the trie. // if the file is complete, key and value will be equal. transaction.put( - ROOTS_COLUMN, + Column::Roots.into(), metadata.fingerprint.as_ref(), file_data.get_root().as_ref(), ); - let full_key = metadata + let mem_db = file_data.as_hash_db(); + let trie = TrieDBBuilder::::new(&mem_db, file_data.get_root()).build(); + + let chunk_count = trie + .iter() + .map_err(|e| { + error!(target: LOG_TARGET, "Failed to construct Trie iterator: {}", e); + FileStorageError::FailedToConstructTrieIter + })? + .count(); + + transaction.put( + Column::ChunkCount.into(), + file_key.as_ref(), + &chunk_count.to_le_bytes(), + ); + + let bucket_prefixed_file_key = metadata .bucket_id .into_iter() - .chain(key.as_ref().into_iter().cloned()) + .chain(file_key.as_ref().into_iter().cloned()) .collect::>(); // Store the key prefixed by bucket id - transaction.put(BUCKET_PREFIX_COLUMN, full_key.as_ref(), &[]); + transaction.put( + Column::BucketPrefix.into(), + bucket_prefixed_file_key.as_ref(), + &[], + ); self.storage.write(transaction).map_err(|e| { error!(target: LOG_TARGET,"{:?}", e); @@ -697,24 +797,14 @@ where Ok(()) } - fn stored_chunks_count(&self, key: &HasherOutT) -> Result { - let metadata = self - .get_metadata(key)? - .ok_or(FileStorageError::FileDoesNotExist)?; - - let raw_root = metadata.fingerprint.as_ref(); - let mut root = convert_raw_bytes_to_hasher_out::(raw_root.to_vec()) - .map_err(|_| FileStorageError::FailedToParseFingerprint)?; - let file_trie = - RocksDbFileDataTrie::::from_existing(self.storage.clone(), &mut root); - - file_trie.stored_chunks_count() - } - - fn get_metadata(&self, key: &HasherOutT) -> Result, FileStorageError> { + /// Retrieves file metadata by file key. + fn get_metadata( + &self, + file_key: &HasherOutT, + ) -> Result, FileStorageError> { let raw_metadata = self .storage - .read(METADATA_COLUMN, key.as_ref()) + .read(Column::Metadata.into(), file_key.as_ref()) .map_err(|e| { error!(target: LOG_TARGET,"{:?}", e); FileStorageError::FailedToReadStorage @@ -731,41 +821,21 @@ where } } + /// Generates a proof for specified chunks of a file. + /// + /// Returns error if file is incomplete or proof generation fails. fn generate_proof( &self, - key: &HasherOutT, + file_key: &HasherOutT, chunk_ids: &Vec, ) -> Result { let metadata = self - .get_metadata(key)? + .get_metadata(file_key)? .ok_or(FileStorageError::FileDoesNotExist)?; - let raw_final_root = metadata.fingerprint.as_ref(); - let final_root = - convert_raw_bytes_to_hasher_out::(raw_final_root.to_vec()).map_err(|e| { - error!(target: LOG_TARGET, "{:?}", e); - FileStorageError::FailedToParseFingerprint - })?; + let file_trie = self.get_file_trie(&metadata)?; - let raw_partial_root = self - .storage - .read(ROOTS_COLUMN, final_root.as_ref()) - .map_err(|e| { - error!(target: LOG_TARGET, "{:?}", e); - FileStorageError::FailedToReadStorage - })? - .expect("Failed to find partial root"); - - let mut partial_root = - convert_raw_bytes_to_hasher_out::(raw_partial_root).map_err(|e| { - error!(target: LOG_TARGET, "{:?}", e); - FileStorageError::FailedToParsePartialRoot - })?; - - let file_trie = - RocksDbFileDataTrie::::from_existing(self.storage.clone(), &mut partial_root); - - let stored_chunks = file_trie.stored_chunks_count()?; + let stored_chunks = self.stored_chunks_count(file_key)?; if metadata.chunks_count() != stored_chunks { return Err(FileStorageError::IncompleteFile); } @@ -785,19 +855,20 @@ where .to_file_key_proof(metadata.clone())) } - fn delete_file(&mut self, key: &HasherOutT) -> Result<(), FileStorageError> { + /// Deletes a file and all its associated data. + fn delete_file(&mut self, file_key: &HasherOutT) -> Result<(), FileStorageError> { let metadata = self - .get_metadata(key)? + .get_metadata(file_key)? .ok_or(FileStorageError::FileDoesNotExist)?; - let raw_root = metadata.fingerprint.as_ref(); - let mut root = convert_raw_bytes_to_hasher_out::(raw_root.to_vec()).map_err(|e| { - error!(target: LOG_TARGET,"{:?}", e); - FileStorageError::FailedToParseFingerprint - })?; + let b_fingerprint = metadata.fingerprint.as_ref(); + let h_fingerprint = + convert_raw_bytes_to_hasher_out::(b_fingerprint.to_vec()).map_err(|e| { + error!(target: LOG_TARGET, "{:?}", e); + FileStorageError::FailedToParseFingerprint + })?; - let mut file_trie = - RocksDbFileDataTrie::::from_existing(self.storage.clone(), &mut root); + let mut file_trie = self.get_file_trie(&metadata)?; file_trie.delete().map_err(|e| { error!(target: LOG_TARGET,"{:?}", e); @@ -805,16 +876,19 @@ where })?; let mut transaction = DBTransaction::new(); - transaction.delete(METADATA_COLUMN, key.as_ref()); - transaction.delete(ROOTS_COLUMN, raw_root); + + transaction.delete(Column::Metadata.into(), file_key.as_ref()); + transaction.delete(Column::Roots.into(), h_fingerprint.as_ref()); + transaction.delete(Column::ChunkCount.into(), file_key.as_ref()); + + let bucket_prefixed_file_key = metadata + .bucket_id + .into_iter() + .chain(file_key.as_ref().iter().cloned()) + .collect::>(); transaction.delete( - BUCKET_PREFIX_COLUMN, - metadata - .bucket_id - .into_iter() - .chain(key.as_ref().iter().cloned()) - .collect::>() - .as_ref(), + Column::BucketPrefix.into(), + bucket_prefixed_file_key.as_ref(), ); self.storage.write(transaction).map_err(|e| { @@ -825,45 +899,54 @@ where Ok(()) } - fn delete_files_with_prefix(&mut self, prefix: &[u8; 32]) -> Result<(), FileStorageError> { - let mut keys_to_delete = Vec::new(); + /// Deletes all files with a matching bucket ID prefix. + fn delete_files_with_prefix( + &mut self, + bucket_id_prefix: &[u8; 32], + ) -> Result<(), FileStorageError> { + let mut file_keys_to_delete = Vec::new(); { let mut iter = self .storage .db - .iter_with_prefix(BUCKET_PREFIX_COLUMN, prefix); + .iter_with_prefix(Column::BucketPrefix.into(), bucket_id_prefix); while let Some(Ok((key, _))) = iter.next() { // Remove the prefix from the key. - let key = key.iter().skip(prefix.len()).copied().collect::>(); + let file_key = key + .iter() + .skip(bucket_id_prefix.len()) + .copied() + .collect::>(); - let key = convert_raw_bytes_to_hasher_out::(key).map_err(|e| { + let h_file_key = convert_raw_bytes_to_hasher_out::(file_key).map_err(|e| { error!(target: LOG_TARGET, "{:?}", e); - FileStorageError::FailedToParseFingerprint + FileStorageError::FailedToParseKey })?; - keys_to_delete.push(key); + file_keys_to_delete.push(h_file_key); } } - for key in keys_to_delete { - self.delete_file(&key)?; + for h_file_key in file_keys_to_delete { + self.delete_file(&h_file_key)?; } Ok(()) } + /// Checks if a key is allowed based on the exclude type. fn is_allowed( &self, - key: &HasherOutT, + file_key: &HasherOutT, exclude_type: ExcludeType, ) -> Result { let exclude_column = get_exclude_type_db_column(exclude_type); let find = self .storage .db - .get(exclude_column, key.as_ref()) + .get(exclude_column, file_key.as_ref()) .map_err(|e| { error!(target: LOG_TARGET, "{:?}", e); FileStorageError::FailedToReadStorage @@ -875,41 +958,43 @@ where } } + /// Adds a key to the specified exclude list. fn add_to_exclude_list( &mut self, - key: HasherOutT, + file_key: HasherOutT, exclude_type: ExcludeType, ) -> Result<(), FileStorageError> { let exclude_column = get_exclude_type_db_column(exclude_type); let mut transaction = DBTransaction::new(); - transaction.put(exclude_column, key.as_ref(), &Vec::::new()); + transaction.put(exclude_column, file_key.as_ref(), &[]); self.storage.db.write(transaction).map_err(|e| { error!(target: LOG_TARGET, "Failed to write to DB: {}", e); FileStorageError::FailedToWriteToStorage })?; - info!("Key added to the exclude list : {:?}", key); + info!("Key added to the exclude list : {:?}", file_key); Ok(()) } + /// Removes a key from the specified exclude list. fn remove_from_exclude_list( &mut self, - key: &HasherOutT, + file_key: &HasherOutT, exclude_type: ExcludeType, ) -> Result<(), FileStorageError> { let exclude_column = get_exclude_type_db_column(exclude_type); let mut transaction = DBTransaction::new(); - transaction.delete(exclude_column, key.as_ref()); + transaction.delete(exclude_column, file_key.as_ref()); self.storage.db.write(transaction).map_err(|e| { error!(target: LOG_TARGET, "Failed to write to DB: {}", e); FileStorageError::FailedToWriteToStorage })?; - info!("Key removed to the exclude list : {:?}", key); + info!("Key removed to the exclude list : {:?}", file_key); Ok(()) } } @@ -924,10 +1009,27 @@ mod tests { use sp_runtime::AccountId32; use sp_trie::LayoutV1; + fn stored_chunks_count( + trie: &RocksDbFileDataTrie, InMemory>, + ) -> Result { + let db = trie.as_hash_db(); + let trie = TrieDBBuilder::>::new(&db, &trie.root).build(); + + let count = trie + .iter() + .map_err(|e| { + error!(target: LOG_TARGET, "Failed to construct Trie iterator: {}", e); + FileStorageError::FailedToConstructTrieIter + })? + .count(); + + Ok(count as u64) + } + #[test] fn file_trie_create_empty_works() { let storage = StorageDb { - db: Arc::new(kvdb_memorydb::create(3)), + db: Arc::new(kvdb_memorydb::create(NUMBER_OF_COLUMNS)), _marker: Default::default(), }; @@ -950,7 +1052,7 @@ mod tests { #[test] fn file_trie_write_chunk_works() { let storage = StorageDb { - db: Arc::new(kvdb_memorydb::create(3)), + db: Arc::new(kvdb_memorydb::create(NUMBER_OF_COLUMNS)), _marker: Default::default(), }; @@ -969,7 +1071,7 @@ mod tests { #[test] fn file_trie_get_chunk_works() { let storage = StorageDb { - db: Arc::new(kvdb_memorydb::create(3)), + db: Arc::new(kvdb_memorydb::create(NUMBER_OF_COLUMNS)), _marker: Default::default(), }; @@ -985,27 +1087,28 @@ mod tests { #[test] fn file_trie_stored_chunks_count_works() { let storage = StorageDb { - db: Arc::new(kvdb_memorydb::create(3)), + db: Arc::new(kvdb_memorydb::create(NUMBER_OF_COLUMNS)), _marker: Default::default(), }; let chunk_ids = vec![ChunkId::new(0u64), ChunkId::new(1u64)]; let chunks = vec![Chunk::from([0u8; 1024]), Chunk::from([1u8; 1024])]; + let mut file_trie = RocksDbFileDataTrie::, InMemory>::new(storage); file_trie.write_chunk(&chunk_ids[0], &chunks[0]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 1); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 1); assert!(file_trie.get_chunk(&chunk_ids[0]).is_ok()); file_trie.write_chunk(&chunk_ids[1], &chunks[1]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 2); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 2); assert!(file_trie.get_chunk(&chunk_ids[1]).is_ok()); } #[test] fn file_trie_generate_proof_works() { let storage = StorageDb { - db: Arc::new(kvdb_memorydb::create(3)), + db: Arc::new(kvdb_memorydb::create(NUMBER_OF_COLUMNS)), _marker: Default::default(), }; @@ -1020,15 +1123,15 @@ mod tests { let mut file_trie = RocksDbFileDataTrie::, InMemory>::new(storage); file_trie.write_chunk(&chunk_ids[0], &chunks[0]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 1); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 1); assert!(file_trie.get_chunk(&chunk_ids[0]).is_ok()); file_trie.write_chunk(&chunk_ids[1], &chunks[1]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 2); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 2); assert!(file_trie.get_chunk(&chunk_ids[1]).is_ok()); file_trie.write_chunk(&chunk_ids[2], &chunks[2]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 3); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 3); assert!(file_trie.get_chunk(&chunk_ids[2]).is_ok()); let file_proof = file_trie.generate_proof(&chunk_ids).unwrap(); @@ -1042,7 +1145,7 @@ mod tests { #[test] fn file_trie_delete_works() { let storage = StorageDb { - db: Arc::new(kvdb_memorydb::create(3)), + db: Arc::new(kvdb_memorydb::create(NUMBER_OF_COLUMNS)), _marker: Default::default(), }; @@ -1057,15 +1160,15 @@ mod tests { let mut file_trie = RocksDbFileDataTrie::, InMemory>::new(storage); file_trie.write_chunk(&chunk_ids[0], &chunks[0]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 1); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 1); assert!(file_trie.get_chunk(&chunk_ids[0]).is_ok()); file_trie.write_chunk(&chunk_ids[1], &chunks[1]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 2); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 2); assert!(file_trie.get_chunk(&chunk_ids[1]).is_ok()); file_trie.write_chunk(&chunk_ids[2], &chunks[2]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 3); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 3); assert!(file_trie.get_chunk(&chunk_ids[2]).is_ok()); file_trie.delete().unwrap(); @@ -1073,7 +1176,7 @@ mod tests { assert!(file_trie.get_chunk(&chunk_ids[1]).is_err()); assert!(file_trie.get_chunk(&chunk_ids[2]).is_err()); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 0); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 0); } #[test] @@ -1085,12 +1188,12 @@ mod tests { ]; let storage = StorageDb { - db: Arc::new(kvdb_memorydb::create(3)), + db: Arc::new(kvdb_memorydb::create(NUMBER_OF_COLUMNS)), _marker: Default::default(), }; let user_storage = StorageDb { - db: Arc::new(kvdb_memorydb::create(3)), + db: Arc::new(kvdb_memorydb::create(NUMBER_OF_COLUMNS)), _marker: Default::default(), }; @@ -1146,7 +1249,7 @@ mod tests { #[test] fn file_storage_insert_file_works() { let storage = StorageDb { - db: Arc::new(kvdb_memorydb::create(3)), + db: Arc::new(kvdb_memorydb::create(NUMBER_OF_COLUMNS)), _marker: Default::default(), }; @@ -1166,15 +1269,15 @@ mod tests { RocksDbFileDataTrie::, InMemory>::new(storage.clone()); file_trie.write_chunk(&chunk_ids[0], &chunks[0]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 1); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 1); assert!(file_trie.get_chunk(&chunk_ids[0]).is_ok()); file_trie.write_chunk(&chunk_ids[1], &chunks[1]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 2); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 2); assert!(file_trie.get_chunk(&chunk_ids[1]).is_ok()); file_trie.write_chunk(&chunk_ids[2], &chunks[2]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 3); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 3); assert!(file_trie.get_chunk(&chunk_ids[2]).is_ok()); let file_metadata = FileMetadata { @@ -1200,7 +1303,7 @@ mod tests { #[test] fn file_storage_delete_file_works() { let storage = StorageDb { - db: Arc::new(kvdb_memorydb::create(3)), + db: Arc::new(kvdb_memorydb::create(NUMBER_OF_COLUMNS)), _marker: Default::default(), }; @@ -1219,15 +1322,15 @@ mod tests { let mut file_trie = RocksDbFileDataTrie::, InMemory>::new(storage.clone()); file_trie.write_chunk(&chunk_ids[0], &chunks[0]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 1); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 1); assert!(file_trie.get_chunk(&chunk_ids[0]).is_ok()); file_trie.write_chunk(&chunk_ids[1], &chunks[1]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 2); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 2); assert!(file_trie.get_chunk(&chunk_ids[1]).is_ok()); file_trie.write_chunk(&chunk_ids[2], &chunks[2]).unwrap(); - assert_eq!(file_trie.stored_chunks_count().unwrap(), 3); + assert_eq!(stored_chunks_count(&file_trie).unwrap(), 3); assert!(file_trie.get_chunk(&chunk_ids[2]).is_ok()); let file_metadata = FileMetadata { @@ -1265,12 +1368,12 @@ mod tests { ]; let storage = StorageDb { - db: Arc::new(kvdb_memorydb::create(3)), + db: Arc::new(kvdb_memorydb::create(NUMBER_OF_COLUMNS)), _marker: Default::default(), }; let user_storage = StorageDb { - db: Arc::new(kvdb_memorydb::create(3)), + db: Arc::new(kvdb_memorydb::create(NUMBER_OF_COLUMNS)), _marker: Default::default(), }; @@ -1355,7 +1458,7 @@ mod tests { #[test] fn delete_files_with_prefix_works() { let storage = StorageDb { - db: Arc::new(kvdb_memorydb::create(4)), + db: Arc::new(kvdb_memorydb::create(NUMBER_OF_COLUMNS)), _marker: Default::default(), }; diff --git a/client/file-manager/src/traits.rs b/client/file-manager/src/traits.rs index 47d818c1e..32e959e14 100644 --- a/client/file-manager/src/traits.rs +++ b/client/file-manager/src/traits.rs @@ -15,10 +15,14 @@ pub enum FileStorageWriteError { FailedToGetFileChunk, /// File metadata fingerprint does not match the stored file fingerprint. FingerprintAndStoredFileMismatch, + /// Failed to construct file trie. + FailedToContructFileTrie, /// Failed to construct iterator for trie. FailedToConstructTrieIter, /// Failed to commit changes in overlay to disk. FailedToPersistChanges, + /// Failed to delete root. + FailedToDeleteRoot, /// Failed to delete chunk. FailedToDeleteChunk, /// Failed to convert raw bytes into [`FileMetadata`]. @@ -33,6 +37,8 @@ pub enum FileStorageWriteError { FailedToParsePartialRoot, /// Failed to get chunks count in storage. FailedToGetStoredChunksCount, + /// Reached chunk count limit (overflow) + ChunkCountOverflow, } #[derive(Debug)] @@ -120,9 +126,6 @@ pub trait FileDataTrie { /// Get the root of the trie. fn get_root(&self) -> &HasherOutT; - /// Get the number of stored chunks in the trie. - fn stored_chunks_count(&self) -> Result; - /// Generate proof for a chunk of a file. Returns error if the chunk does not exist. fn generate_proof(&self, chunk_ids: &Vec) -> Result; diff --git a/node/src/tasks/bsp_upload_file.rs b/node/src/tasks/bsp_upload_file.rs index 35694b7e6..b8817dadb 100644 --- a/node/src/tasks/bsp_upload_file.rs +++ b/node/src/tasks/bsp_upload_file.rs @@ -912,13 +912,15 @@ where FileStorageWriteError::FailedToGetFileChunk | FileStorageWriteError::FailedToInsertFileChunk | FileStorageWriteError::FailedToDeleteChunk + | FileStorageWriteError::FailedToDeleteRoot | FileStorageWriteError::FailedToPersistChanges | FileStorageWriteError::FailedToParseFileMetadata | FileStorageWriteError::FailedToParseFingerprint | FileStorageWriteError::FailedToReadStorage | FileStorageWriteError::FailedToUpdatePartialRoot | FileStorageWriteError::FailedToParsePartialRoot - | FileStorageWriteError::FailedToGetStoredChunksCount => { + | FileStorageWriteError::FailedToGetStoredChunksCount + | FileStorageWriteError::ChunkCountOverflow => { return Err(anyhow::anyhow!(format!( "Internal trie read/write error {:?}:{:?}", event.file_key, chunk.key @@ -930,7 +932,8 @@ where event.file_key ))); } - FileStorageWriteError::FailedToConstructTrieIter => { + FileStorageWriteError::FailedToConstructTrieIter + | FileStorageWriteError::FailedToContructFileTrie => { return Err(anyhow::anyhow!(format!( "This is a bug! Failed to construct trie iter for key {:?}.", event.file_key diff --git a/node/src/tasks/msp_upload_file.rs b/node/src/tasks/msp_upload_file.rs index f82f074de..128450b07 100644 --- a/node/src/tasks/msp_upload_file.rs +++ b/node/src/tasks/msp_upload_file.rs @@ -764,13 +764,15 @@ where FileStorageWriteError::FailedToGetFileChunk | FileStorageWriteError::FailedToInsertFileChunk | FileStorageWriteError::FailedToDeleteChunk + | FileStorageWriteError::FailedToDeleteRoot | FileStorageWriteError::FailedToPersistChanges | FileStorageWriteError::FailedToParseFileMetadata | FileStorageWriteError::FailedToParseFingerprint | FileStorageWriteError::FailedToReadStorage | FileStorageWriteError::FailedToUpdatePartialRoot | FileStorageWriteError::FailedToParsePartialRoot - | FileStorageWriteError::FailedToGetStoredChunksCount => { + | FileStorageWriteError::FailedToGetStoredChunksCount + | FileStorageWriteError::ChunkCountOverflow => { self.handle_rejected_storage_request( &event.file_key.into(), bucket_id, @@ -794,7 +796,8 @@ where event.file_key ))); } - FileStorageWriteError::FailedToConstructTrieIter => { + FileStorageWriteError::FailedToConstructTrieIter + | FileStorageWriteError::FailedToContructFileTrie => { self.handle_rejected_storage_request( &event.file_key.into(), bucket_id, diff --git a/pallets/file-system/src/benchmarking.rs b/pallets/file-system/src/benchmarking.rs index b0a194175..420d83b05 100644 --- a/pallets/file-system/src/benchmarking.rs +++ b/pallets/file-system/src/benchmarking.rs @@ -684,7 +684,7 @@ mod benchmarks { .map(|file_key| { let reject_reason = RejectedStorageRequestReason::ReachedMaximumCapacity; RejectedStorageRequest { - file_key: file_key.clone(), + file_key: *file_key, reason: reject_reason, } }) @@ -2448,7 +2448,7 @@ mod benchmarks { /*********** Call the function to benchmark: ***********/ #[block] { - Pallet::::process_expired_storage_request(file_key.clone(), &mut WeightMeter::new()); + Pallet::::process_expired_storage_request(file_key, &mut WeightMeter::new()); } /*********** Post-benchmark checks: ***********/ @@ -2570,7 +2570,7 @@ mod benchmarks { /*********** Call the function to benchmark: ***********/ #[block] { - Pallet::::process_expired_storage_request(file_key.clone(), &mut WeightMeter::new()); + Pallet::::process_expired_storage_request(file_key, &mut WeightMeter::new()); } /*********** Post-benchmark checks: ***********/ @@ -2716,8 +2716,8 @@ mod benchmarks { ValuePropId = ::ValuePropId, >, { - let msp_hash = if msp_id.is_some() { - msp_id.unwrap() + let msp_hash = if let Some(msp_id) = msp_id { + msp_id } else { T::Hashing::hash_of(&msp) }; @@ -2779,8 +2779,8 @@ mod benchmarks { T: crate::Config>, { // Derive the BSP ID from the hash of its account - let bsp_id = if bsp_id.is_some() { - bsp_id.unwrap() + let bsp_id = if let Some(bsp_id) = bsp_id { + bsp_id } else { T::Hashing::hash_of(&bsp_account) };