diff --git a/client/file-manager/src/rocksdb.rs b/client/file-manager/src/rocksdb.rs index 3254d8b91..4249ff11f 100644 --- a/client/file-manager/src/rocksdb.rs +++ b/client/file-manager/src/rocksdb.rs @@ -586,6 +586,22 @@ where return Ok(FileStorageWriteOutcome::FileIncomplete); } + let current_fingerprint = file_trie.get_root().as_ref().try_into().map_err(|_| { + error!(target: LOG_TARGET, "Failed to convert root to fingerprint"); + FileStorageWriteError::FailedToParseFingerprint + })?; + + // Verify that the final root matches the expected fingerprint + if metadata.fingerprint != current_fingerprint { + error!( + target: LOG_TARGET, + "Fingerprint mismatch. Expected: {:?}, got: {:?}", + metadata.fingerprint, + file_trie.get_root() + ); + return Err(FileStorageWriteError::FingerprintAndStoredFileMismatch); + } + Ok(FileStorageWriteOutcome::FileComplete) } @@ -918,7 +934,7 @@ where mod tests { use super::*; use kvdb_memorydb::InMemory; - use shc_common::types::Fingerprint; + use shc_common::types::{Fingerprint, FILE_CHUNK_SIZE}; use sp_core::H256; use sp_runtime::traits::BlakeTwo256; use sp_runtime::AccountId32; @@ -1079,9 +1095,9 @@ mod tests { #[test] fn file_storage_write_chunk_works() { let chunks = vec![ - Chunk::from([5u8; 32]), - Chunk::from([6u8; 32]), - Chunk::from([7u8; 32]), + Chunk::from([5u8; FILE_CHUNK_SIZE as usize]), + Chunk::from([6u8; FILE_CHUNK_SIZE as usize]), + Chunk::from([7u8; FILE_CHUNK_SIZE as usize]), ]; let storage = StorageDb { @@ -1089,55 +1105,43 @@ mod tests { _marker: Default::default(), }; - let user_storage = StorageDb { - db: Arc::new(kvdb_memorydb::create(3)), - _marker: Default::default(), - }; - - let mut user_file_trie = - RocksDbFileDataTrie::, InMemory>::new(user_storage.clone()); - - for (id, chunk) in chunks.iter().enumerate() { - user_file_trie - .write_chunk(&ChunkId::new(id as u64), chunk) - .unwrap(); - } - - let fingerprint = Fingerprint::from(user_file_trie.get_root().as_ref()); - let chunk_ids: Vec = chunks .iter() .enumerate() .map(|(id, _)| ChunkId::new(id as u64)) .collect(); + // Create a file trie to get the expected fingerprint + let mut file_trie = + RocksDbFileDataTrie::, InMemory>::new(storage.clone()); + + for (chunk_id, chunk) in chunk_ids.iter().zip(chunks.iter()) { + file_trie.write_chunk(chunk_id, chunk).unwrap(); + } + let file_metadata = FileMetadata { - file_size: 32u64 * chunks.len() as u64, - fingerprint, + file_size: FILE_CHUNK_SIZE * chunks.len() as u64, + fingerprint: file_trie.get_root().as_ref().into(), owner: >::as_ref(&AccountId32::new([0u8; 32])).to_vec(), location: "location".to_string().into_bytes(), bucket_id: [1u8; 32].to_vec(), }; - let key = file_metadata.file_key::(); + let key = file_metadata.file_key::(); let mut file_storage = RocksDbFileStorage::, InMemory>::new(storage); - file_storage.insert_file(key, file_metadata).unwrap(); - file_storage - .write_chunk(&key, &chunk_ids[0], &chunks[0]) - .unwrap(); - assert!(file_storage.get_chunk(&key, &chunk_ids[0]).is_ok()); - - file_storage - .write_chunk(&key, &chunk_ids[1], &chunks[1]) - .unwrap(); - assert!(file_storage.get_chunk(&key, &chunk_ids[1]).is_ok()); + // Insert file metadata first + file_storage.insert_file(key, file_metadata).unwrap(); - file_storage - .write_chunk(&key, &chunk_ids[2], &chunks[2]) - .unwrap(); - assert!(file_storage.get_chunk(&key, &chunk_ids[2]).is_ok()); + // Write chunks one by one and verify + for (chunk_id, chunk) in chunk_ids.iter().zip(chunks.iter()) { + let result = file_storage.write_chunk(&key, chunk_id, chunk); + assert!(result.is_ok()); + assert!(file_storage.get_chunk(&key, chunk_id).is_ok()); + } + // Verify final state + assert!(file_storage.get_metadata(&key).is_ok()); assert!(file_storage.get_chunk(&key, &chunk_ids[0]).is_ok()); assert!(file_storage.get_chunk(&key, &chunk_ids[1]).is_ok()); assert!(file_storage.get_chunk(&key, &chunk_ids[2]).is_ok()); diff --git a/node/src/tasks/bsp_upload_file.rs b/node/src/tasks/bsp_upload_file.rs index 35694b7e6..86dfea9eb 100644 --- a/node/src/tasks/bsp_upload_file.rs +++ b/node/src/tasks/bsp_upload_file.rs @@ -839,6 +839,23 @@ where let file_key = event.file_key.into(); let mut write_file_storage = self.storage_hub_handler.file_storage.write().await; + // Get the file metadata to verify the fingerprint + let file_metadata = write_file_storage + .get_metadata(&file_key) + .map_err(|e| anyhow!("Failed to get file metadata: {:?}", e))? + .ok_or_else(|| anyhow!("File metadata not found"))?; + + // Verify that the fingerprint in the proof matches the expected file fingerprint + let expected_fingerprint = file_metadata.fingerprint; + if event.file_key_proof.file_metadata.fingerprint != expected_fingerprint { + error!( + target: LOG_TARGET, + "Fingerprint mismatch for file {:?}. Expected: {:?}, got: {:?}", + file_key, expected_fingerprint, event.file_key_proof.file_metadata.fingerprint + ); + return Err(anyhow!("Fingerprint mismatch")); + } + // Verify and extract chunks from proof let proven = match event .file_key_proof diff --git a/node/src/tasks/msp_move_bucket.rs b/node/src/tasks/msp_move_bucket.rs index e175e82c6..becaae286 100644 --- a/node/src/tasks/msp_move_bucket.rs +++ b/node/src/tasks/msp_move_bucket.rs @@ -469,17 +469,28 @@ where let file_key_proof = match FileKeyProof::decode(&mut download_request.file_key_proof.as_ref()) { - Ok(proof) => proof, + Ok(file_key_proof) => file_key_proof, Err(error) => { error!( target: LOG_TARGET, - "Failed to decode file key proof: {:?}", - error + "Failed to decode file key proof for chunk {:?} of file {:?}: {:?}", + chunk, file_key, error ); continue; } }; + // Verify that the fingerprint in the proof matches the expected file fingerprint + let expected_fingerprint = file_metadata.fingerprint; + if file_key_proof.file_metadata.fingerprint != expected_fingerprint { + error!( + target: LOG_TARGET, + "Fingerprint mismatch for file {:?}. Expected: {:?}, got: {:?}", + file_key, expected_fingerprint, file_key_proof.file_metadata.fingerprint + ); + continue; + } + let proven = match file_key_proof.proven::() { Ok(data) => data, Err(error) => { diff --git a/node/src/tasks/msp_upload_file.rs b/node/src/tasks/msp_upload_file.rs index f82f074de..0cb91a9e5 100644 --- a/node/src/tasks/msp_upload_file.rs +++ b/node/src/tasks/msp_upload_file.rs @@ -656,12 +656,13 @@ where &mut self, event: RemoteUploadRequest, ) -> anyhow::Result { + let file_key = event.file_key.into(); let bucket_id = match self .storage_hub_handler .file_storage .read() .await - .get_metadata(&event.file_key.into()) + .get_metadata(&file_key) { Ok(metadata) => match metadata { Some(metadata) => H256(metadata.bucket_id.try_into().unwrap()), @@ -678,6 +679,27 @@ where } }; + // Get the file metadata to verify the fingerprint + let file_metadata = { + let read_file_storage = self.storage_hub_handler.file_storage.read().await; + read_file_storage + .get_metadata(&file_key) + .map_err(|e| anyhow!("Failed to get file metadata: {:?}", e))? + .ok_or_else(|| anyhow!("File metadata not found"))? + }; + + // Verify that the fingerprint in the proof matches the expected file fingerprint + let expected_fingerprint = file_metadata.fingerprint; + if event.file_key_proof.file_metadata.fingerprint != expected_fingerprint { + error!( + target: LOG_TARGET, + "Fingerprint mismatch for file {:?}. Expected: {:?}, got: {:?}", + file_key, expected_fingerprint, event.file_key_proof.file_metadata.fingerprint + ); + return Err(anyhow!("Fingerprint mismatch")); + } + + // Verify and extract chunks from proof let proven = match event .file_key_proof .proven::() @@ -711,15 +733,19 @@ where // Handle invalid proof case let proven = match proven { Ok(proven) => proven, - Err(e) => { - warn!(target: LOG_TARGET, "{}", e); + Err(error) => { + error!( + target: LOG_TARGET, + "Failed to verify proof for file {:?}: {:?}", + file_key, error + ); self.handle_rejected_storage_request( - &event.file_key.into(), + &file_key, bucket_id, RejectedStorageRequestReason::ReceivedInvalidProof, ) .await?; - return Err(e); + return Err(anyhow!("Failed to verify proof")); } }; @@ -728,8 +754,7 @@ where // Process each proven chunk in the batch for chunk in proven { - let write_result = - write_file_storage.write_chunk(&event.file_key.into(), &chunk.key, &chunk.data); + let write_result = write_file_storage.write_chunk(&file_key, &chunk.key, &chunk.data); match write_result { Ok(outcome) => match outcome { @@ -751,15 +776,15 @@ where } FileStorageWriteError::FileDoesNotExist => { self.handle_rejected_storage_request( - &event.file_key.into(), + &file_key, bucket_id, RejectedStorageRequestReason::InternalError, ) .await?; return Err(anyhow::anyhow!(format!( - "File does not exist for key {:?}. Maybe we forgot to unregister before deleting?", - event.file_key - ))); + "File does not exist for key {:?}. Maybe we forgot to unregister before deleting?", + file_key + ))); } FileStorageWriteError::FailedToGetFileChunk | FileStorageWriteError::FailedToInsertFileChunk @@ -772,38 +797,38 @@ where | FileStorageWriteError::FailedToParsePartialRoot | FileStorageWriteError::FailedToGetStoredChunksCount => { self.handle_rejected_storage_request( - &event.file_key.into(), + &file_key, bucket_id, RejectedStorageRequestReason::InternalError, ) .await?; return Err(anyhow::anyhow!(format!( "Internal trie read/write error {:?}:{:?}", - event.file_key, chunk.key + file_key, chunk.key ))); } FileStorageWriteError::FingerprintAndStoredFileMismatch => { self.handle_rejected_storage_request( - &event.file_key.into(), + &file_key, bucket_id, RejectedStorageRequestReason::InternalError, ) .await?; return Err(anyhow::anyhow!(format!( - "Invariant broken! This is a bug! Fingerprint and stored file mismatch for key {:?}.", - event.file_key - ))); + "Invariant broken! This is a bug! Fingerprint and stored file mismatch for key {:?}.", + file_key + ))); } FileStorageWriteError::FailedToConstructTrieIter => { self.handle_rejected_storage_request( - &event.file_key.into(), + &file_key, bucket_id, RejectedStorageRequestReason::InternalError, ) .await?; return Err(anyhow::anyhow!(format!( "This is a bug! Failed to construct trie iter for key {:?}.", - event.file_key + file_key ))); } }, @@ -813,18 +838,18 @@ where // If we haven't found the file to be complete during chunk processing, // check if it's complete now (in case this was the last batch) if !file_complete { - match write_file_storage.is_file_complete(&event.file_key.into()) { + match write_file_storage.is_file_complete(&file_key) { Ok(is_complete) => file_complete = is_complete, Err(e) => { self.handle_rejected_storage_request( - &event.file_key.into(), + &file_key, bucket_id, RejectedStorageRequestReason::InternalError, ) .await?; let err_msg = format!( "Failed to check if file is complete. The file key {:?} is in a bad state with error: {:?}", - event.file_key, e + file_key, e ); error!(target: LOG_TARGET, "{}", err_msg); return Err(anyhow::anyhow!(err_msg));