Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix: properly check fingerprint #358

Merged
merged 6 commits into from
Feb 14, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
78 changes: 41 additions & 37 deletions client/file-manager/src/rocksdb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -586,6 +586,22 @@ where
return Ok(FileStorageWriteOutcome::FileIncomplete);
}

let current_fingerprint = file_trie.get_root().as_ref().try_into().map_err(|_| {
error!(target: LOG_TARGET, "Failed to convert root to fingerprint");
FileStorageWriteError::FailedToParseFingerprint
})?;

// Verify that the final root matches the expected fingerprint
if metadata.fingerprint != current_fingerprint {
error!(
target: LOG_TARGET,
"Fingerprint mismatch. Expected: {:?}, got: {:?}",
metadata.fingerprint,
file_trie.get_root()
);
return Err(FileStorageWriteError::FingerprintAndStoredFileMismatch);
}

Ok(FileStorageWriteOutcome::FileComplete)
}

Expand Down Expand Up @@ -918,7 +934,7 @@ where
mod tests {
use super::*;
use kvdb_memorydb::InMemory;
use shc_common::types::Fingerprint;
use shc_common::types::{Fingerprint, FILE_CHUNK_SIZE};
use sp_core::H256;
use sp_runtime::traits::BlakeTwo256;
use sp_runtime::AccountId32;
Expand Down Expand Up @@ -1079,65 +1095,53 @@ mod tests {
#[test]
fn file_storage_write_chunk_works() {
let chunks = vec![
Chunk::from([5u8; 32]),
Chunk::from([6u8; 32]),
Chunk::from([7u8; 32]),
Chunk::from([5u8; FILE_CHUNK_SIZE as usize]),
Chunk::from([6u8; FILE_CHUNK_SIZE as usize]),
Chunk::from([7u8; FILE_CHUNK_SIZE as usize]),
];

let storage = StorageDb {
db: Arc::new(kvdb_memorydb::create(3)),
_marker: Default::default(),
};

let user_storage = StorageDb {
db: Arc::new(kvdb_memorydb::create(3)),
_marker: Default::default(),
};

let mut user_file_trie =
RocksDbFileDataTrie::<LayoutV1<BlakeTwo256>, InMemory>::new(user_storage.clone());

for (id, chunk) in chunks.iter().enumerate() {
user_file_trie
.write_chunk(&ChunkId::new(id as u64), chunk)
.unwrap();
}

let fingerprint = Fingerprint::from(user_file_trie.get_root().as_ref());

let chunk_ids: Vec<ChunkId> = chunks
.iter()
.enumerate()
.map(|(id, _)| ChunkId::new(id as u64))
.collect();

// Create a file trie to get the expected fingerprint
let mut file_trie =
RocksDbFileDataTrie::<LayoutV1<BlakeTwo256>, InMemory>::new(storage.clone());

for (chunk_id, chunk) in chunk_ids.iter().zip(chunks.iter()) {
file_trie.write_chunk(chunk_id, chunk).unwrap();
}

let file_metadata = FileMetadata {
file_size: 32u64 * chunks.len() as u64,
fingerprint,
file_size: FILE_CHUNK_SIZE * chunks.len() as u64,
fingerprint: file_trie.get_root().as_ref().into(),
owner: <AccountId32 as AsRef<[u8]>>::as_ref(&AccountId32::new([0u8; 32])).to_vec(),
location: "location".to_string().into_bytes(),
bucket_id: [1u8; 32].to_vec(),
};
let key = file_metadata.file_key::<BlakeTwo256>();

let key = file_metadata.file_key::<BlakeTwo256>();
let mut file_storage = RocksDbFileStorage::<LayoutV1<BlakeTwo256>, InMemory>::new(storage);
file_storage.insert_file(key, file_metadata).unwrap();

file_storage
.write_chunk(&key, &chunk_ids[0], &chunks[0])
.unwrap();
assert!(file_storage.get_chunk(&key, &chunk_ids[0]).is_ok());

file_storage
.write_chunk(&key, &chunk_ids[1], &chunks[1])
.unwrap();
assert!(file_storage.get_chunk(&key, &chunk_ids[1]).is_ok());
// Insert file metadata first
file_storage.insert_file(key, file_metadata).unwrap();

file_storage
.write_chunk(&key, &chunk_ids[2], &chunks[2])
.unwrap();
assert!(file_storage.get_chunk(&key, &chunk_ids[2]).is_ok());
// Write chunks one by one and verify
for (chunk_id, chunk) in chunk_ids.iter().zip(chunks.iter()) {
let result = file_storage.write_chunk(&key, chunk_id, chunk);
assert!(result.is_ok());
assert!(file_storage.get_chunk(&key, chunk_id).is_ok());
}

// Verify final state
assert!(file_storage.get_metadata(&key).is_ok());
assert!(file_storage.get_chunk(&key, &chunk_ids[0]).is_ok());
assert!(file_storage.get_chunk(&key, &chunk_ids[1]).is_ok());
assert!(file_storage.get_chunk(&key, &chunk_ids[2]).is_ok());
Expand Down
17 changes: 17 additions & 0 deletions node/src/tasks/bsp_upload_file.rs
Original file line number Diff line number Diff line change
Expand Up @@ -839,6 +839,23 @@ where
let file_key = event.file_key.into();
let mut write_file_storage = self.storage_hub_handler.file_storage.write().await;

// Get the file metadata to verify the fingerprint
let file_metadata = write_file_storage
.get_metadata(&file_key)
.map_err(|e| anyhow!("Failed to get file metadata: {:?}", e))?
.ok_or_else(|| anyhow!("File metadata not found"))?;

// Verify that the fingerprint in the proof matches the expected file fingerprint
let expected_fingerprint = file_metadata.fingerprint;
if event.file_key_proof.file_metadata.fingerprint != expected_fingerprint {
error!(
target: LOG_TARGET,
"Fingerprint mismatch for file {:?}. Expected: {:?}, got: {:?}",
file_key, expected_fingerprint, event.file_key_proof.file_metadata.fingerprint
);
return Err(anyhow!("Fingerprint mismatch"));
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

From what I see, in cases like this where we return error from handle_remote_upload_request_event, we don't unvolunteer from the file. Should we? Honestly, right now we're lowering the user's reputation, eventually blocking it as a peer. Which is not that bad imo.

}

// Verify and extract chunks from proof
let proven = match event
.file_key_proof
Expand Down
17 changes: 14 additions & 3 deletions node/src/tasks/msp_move_bucket.rs
Original file line number Diff line number Diff line change
Expand Up @@ -469,17 +469,28 @@ where

let file_key_proof =
match FileKeyProof::decode(&mut download_request.file_key_proof.as_ref()) {
Ok(proof) => proof,
Ok(file_key_proof) => file_key_proof,
Err(error) => {
error!(
target: LOG_TARGET,
"Failed to decode file key proof: {:?}",
error
"Failed to decode file key proof for chunk {:?} of file {:?}: {:?}",
chunk, file_key, error
);
continue;
}
};

// Verify that the fingerprint in the proof matches the expected file fingerprint
let expected_fingerprint = file_metadata.fingerprint;
if file_key_proof.file_metadata.fingerprint != expected_fingerprint {
error!(
target: LOG_TARGET,
"Fingerprint mismatch for file {:?}. Expected: {:?}, got: {:?}",
file_key, expected_fingerprint, file_key_proof.file_metadata.fingerprint
);
continue;
}

let proven = match file_key_proof.proven::<StorageProofsMerkleTrieLayout>() {
Ok(data) => data,
Err(error) => {
Expand Down
69 changes: 47 additions & 22 deletions node/src/tasks/msp_upload_file.rs
Original file line number Diff line number Diff line change
Expand Up @@ -656,12 +656,13 @@ where
&mut self,
event: RemoteUploadRequest,
) -> anyhow::Result<bool> {
let file_key = event.file_key.into();
let bucket_id = match self
.storage_hub_handler
.file_storage
.read()
.await
.get_metadata(&event.file_key.into())
.get_metadata(&file_key)
{
Ok(metadata) => match metadata {
Some(metadata) => H256(metadata.bucket_id.try_into().unwrap()),
Expand All @@ -678,6 +679,27 @@ where
}
};

// Get the file metadata to verify the fingerprint
let file_metadata = {
let read_file_storage = self.storage_hub_handler.file_storage.read().await;
read_file_storage
.get_metadata(&file_key)
.map_err(|e| anyhow!("Failed to get file metadata: {:?}", e))?
.ok_or_else(|| anyhow!("File metadata not found"))?
};

// Verify that the fingerprint in the proof matches the expected file fingerprint
let expected_fingerprint = file_metadata.fingerprint;
if event.file_key_proof.file_metadata.fingerprint != expected_fingerprint {
error!(
target: LOG_TARGET,
"Fingerprint mismatch for file {:?}. Expected: {:?}, got: {:?}",
file_key, expected_fingerprint, event.file_key_proof.file_metadata.fingerprint
);
return Err(anyhow!("Fingerprint mismatch"));
}

// Verify and extract chunks from proof
let proven = match event
.file_key_proof
.proven::<StorageProofsMerkleTrieLayout>()
Expand Down Expand Up @@ -711,15 +733,19 @@ where
// Handle invalid proof case
let proven = match proven {
Ok(proven) => proven,
Err(e) => {
warn!(target: LOG_TARGET, "{}", e);
Err(error) => {
error!(
target: LOG_TARGET,
"Failed to verify proof for file {:?}: {:?}",
file_key, error
);
self.handle_rejected_storage_request(
&event.file_key.into(),
&file_key,
bucket_id,
RejectedStorageRequestReason::ReceivedInvalidProof,
)
.await?;
return Err(e);
return Err(anyhow!("Failed to verify proof"));
}
};

Expand All @@ -728,8 +754,7 @@ where

// Process each proven chunk in the batch
for chunk in proven {
let write_result =
write_file_storage.write_chunk(&event.file_key.into(), &chunk.key, &chunk.data);
let write_result = write_file_storage.write_chunk(&file_key, &chunk.key, &chunk.data);

match write_result {
Ok(outcome) => match outcome {
Expand All @@ -751,15 +776,15 @@ where
}
FileStorageWriteError::FileDoesNotExist => {
self.handle_rejected_storage_request(
&event.file_key.into(),
&file_key,
bucket_id,
RejectedStorageRequestReason::InternalError,
)
.await?;
return Err(anyhow::anyhow!(format!(
"File does not exist for key {:?}. Maybe we forgot to unregister before deleting?",
event.file_key
)));
"File does not exist for key {:?}. Maybe we forgot to unregister before deleting?",
file_key
)));
}
FileStorageWriteError::FailedToGetFileChunk
| FileStorageWriteError::FailedToInsertFileChunk
Expand All @@ -772,38 +797,38 @@ where
| FileStorageWriteError::FailedToParsePartialRoot
| FileStorageWriteError::FailedToGetStoredChunksCount => {
self.handle_rejected_storage_request(
&event.file_key.into(),
&file_key,
bucket_id,
RejectedStorageRequestReason::InternalError,
)
.await?;
return Err(anyhow::anyhow!(format!(
"Internal trie read/write error {:?}:{:?}",
event.file_key, chunk.key
file_key, chunk.key
)));
}
FileStorageWriteError::FingerprintAndStoredFileMismatch => {
self.handle_rejected_storage_request(
&event.file_key.into(),
&file_key,
bucket_id,
RejectedStorageRequestReason::InternalError,
)
.await?;
return Err(anyhow::anyhow!(format!(
"Invariant broken! This is a bug! Fingerprint and stored file mismatch for key {:?}.",
event.file_key
)));
"Invariant broken! This is a bug! Fingerprint and stored file mismatch for key {:?}.",
file_key
)));
}
FileStorageWriteError::FailedToConstructTrieIter => {
self.handle_rejected_storage_request(
&event.file_key.into(),
&file_key,
bucket_id,
RejectedStorageRequestReason::InternalError,
)
.await?;
return Err(anyhow::anyhow!(format!(
"This is a bug! Failed to construct trie iter for key {:?}.",
event.file_key
file_key
)));
}
},
Expand All @@ -813,18 +838,18 @@ where
// If we haven't found the file to be complete during chunk processing,
// check if it's complete now (in case this was the last batch)
if !file_complete {
match write_file_storage.is_file_complete(&event.file_key.into()) {
match write_file_storage.is_file_complete(&file_key) {
Ok(is_complete) => file_complete = is_complete,
Err(e) => {
self.handle_rejected_storage_request(
&event.file_key.into(),
&file_key,
bucket_id,
RejectedStorageRequestReason::InternalError,
)
.await?;
let err_msg = format!(
"Failed to check if file is complete. The file key {:?} is in a bad state with error: {:?}",
event.file_key, e
file_key, e
);
error!(target: LOG_TARGET, "{}", err_msg);
return Err(anyhow::anyhow!(err_msg));
Expand Down
Loading