Skip to content

Commit

Permalink
adjust clean reset logic (#1668)
Browse files Browse the repository at this point in the history
* adjust clean reset logic

* fix tests

* properly unit test protectors

* fix and bump 0.15.6

* fmt
  • Loading branch information
brenzi authored Dec 23, 2024
1 parent 3e9ebb0 commit b0d4a8d
Show file tree
Hide file tree
Showing 11 changed files with 190 additions and 40 deletions.
4 changes: 2 additions & 2 deletions Cargo.lock
Original file line number Diff line number Diff line change
Expand Up @@ -2557,7 +2557,7 @@ dependencies = [

[[package]]
name = "integritee-cli"
version = "0.15.5"
version = "0.15.6"
dependencies = [
"array-bytes 6.1.0",
"base58",
Expand Down Expand Up @@ -2610,7 +2610,7 @@ dependencies = [

[[package]]
name = "integritee-service"
version = "0.15.5"
version = "0.15.6"
dependencies = [
"anyhow",
"async-trait",
Expand Down
2 changes: 1 addition & 1 deletion cli/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "integritee-cli"
version = "0.15.5"
version = "0.15.6"
authors = ["Integritee AG <[email protected]>"]
edition = "2021"

Expand Down
2 changes: 1 addition & 1 deletion core-primitives/settings/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ pub mod files {
pub static SHIELDING_KEY_FILE: &str = "enclave-shielding-pubkey.json";
pub static SIGNING_KEY_FILE: &str = "enclave-signing-pubkey.bin";
/// sidechain database path
pub static SIDECHAIN_STORAGE_PATH: &str = "sidechain_db";
pub static SIDECHAIN_BLOCKS_DB_STORAGE_PATH: &str = "sidechain_db";
pub static SIDECHAIN_PURGE_INTERVAL: u64 = 7200; // purge sidechain every .. s
pub static SIDECHAIN_PURGE_LIMIT: u64 = 100; // keep the last.. sidechainblocks when purging

Expand Down
2 changes: 1 addition & 1 deletion enclave-runtime/Cargo.lock
Original file line number Diff line number Diff line change
Expand Up @@ -771,7 +771,7 @@ dependencies = [

[[package]]
name = "enclave-runtime"
version = "0.15.5"
version = "0.15.6"
dependencies = [
"array-bytes 6.2.2",
"cid",
Expand Down
2 changes: 1 addition & 1 deletion enclave-runtime/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "enclave-runtime"
version = "0.15.5"
version = "0.15.6"
authors = ["Integritee AG <[email protected]>"]
edition = "2021"

Expand Down
2 changes: 1 addition & 1 deletion service/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "integritee-service"
version = "0.15.5"
version = "0.15.6"
authors = ["Integritee AG <[email protected]>"]
build = "build.rs"
edition = "2021"
Expand Down
6 changes: 6 additions & 0 deletions service/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -202,6 +202,12 @@ impl Config {
pub fn try_parse_untrusted_http_server_port(&self) -> Option<u16> {
self.untrusted_http_port.parse::<u16>().ok()
}

pub fn with_test_data_dir(&self) -> Self {
let mut new = self.clone();
new.data_dir.push("test");
new
}
}

impl From<&ArgMatches<'_>> for Config {
Expand Down
19 changes: 15 additions & 4 deletions service/src/main_impl.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ use ita_parentchain_interface::{
};
use itc_parentchain::primitives::ParentchainId;
use itp_node_api::api_client::ChainApi;
use itp_settings::files::SHARDS_PATH;
use itp_types::parentchain::{AccountId, Balance, Index};
use sp_core::crypto::{AccountId32, Ss58Codec};
use sp_keyring::AccountKeyring;
Expand Down Expand Up @@ -148,7 +149,10 @@ pub(crate) fn main() {

let clean_reset = matches.is_present("clean-reset");
if clean_reset {
crate::setup::purge_files_from_dir(config.data_dir()).unwrap();
println!("[+] Performing a clean reset of the worker");
setup::purge_integritee_lcdb_unless_protected(config.data_dir()).unwrap();
setup::purge_target_a_lcdb_unless_protected(config.data_dir()).unwrap();
setup::purge_target_b_lcdb_unless_protected(config.data_dir()).unwrap();
}

// build the entire dependency tree
Expand Down Expand Up @@ -227,11 +231,18 @@ pub(crate) fn main() {
};

if let Some(run_config) = config.run_config() {
let shard = extract_shard(run_config.shard(), enclave.as_ref());

println!("Worker Config: {:?}", config);

if clean_reset {
let shard = extract_shard(run_config.shard(), enclave.as_ref());

let mut shard_path = PathBuf::from(config.data_dir());
shard_path.push(SHARDS_PATH);
shard_path.push(shard.encode().to_base58());
println!("Worker Shard Path: {:?}", shard_path);
if clean_reset || std::fs::metadata(shard_path).is_err() {
// we default to purge here because we don't want to leave behind blocks
// for deprectated shards in the sidechain_db
setup::purge_shards_unless_protected(config.data_dir()).unwrap();
setup::initialize_shard_and_keys(enclave.as_ref(), &shard).unwrap();
}

Expand Down
174 changes: 150 additions & 24 deletions service/src/setup.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,17 @@

use crate::error::{Error, ServiceResult};
use itp_settings::files::{
INTEGRITEE_PARENTCHAIN_LIGHT_CLIENT_DB_PATH, SHARDS_PATH, SIDECHAIN_STORAGE_PATH,
INTEGRITEE_PARENTCHAIN_LIGHT_CLIENT_DB_PATH, SHARDS_PATH, SIDECHAIN_BLOCKS_DB_STORAGE_PATH,
TARGET_A_PARENTCHAIN_LIGHT_CLIENT_DB_PATH, TARGET_B_PARENTCHAIN_LIGHT_CLIENT_DB_PATH,
};
use std::{fs, path::Path};

#[cfg(feature = "link-binary")]
pub(crate) use needs_enclave::{
generate_shielding_key_file, generate_signing_key_file, init_shard, initialize_shard_and_keys,
};
use std::{
fs,
path::{Path, PathBuf},
};

#[cfg(feature = "link-binary")]
mod needs_enclave {
Expand All @@ -35,8 +37,8 @@ mod needs_enclave {
use itp_enclave_api::{enclave_base::EnclaveBase, Enclave};
use itp_settings::files::{
INTEGRITEE_PARENTCHAIN_LIGHT_CLIENT_DB_PATH, SHARDS_PATH, SHIELDING_KEY_FILE,
SIDECHAIN_STORAGE_PATH, SIGNING_KEY_FILE, TARGET_A_PARENTCHAIN_LIGHT_CLIENT_DB_PATH,
TARGET_B_PARENTCHAIN_LIGHT_CLIENT_DB_PATH,
SIDECHAIN_BLOCKS_DB_STORAGE_PATH, SIGNING_KEY_FILE,
TARGET_A_PARENTCHAIN_LIGHT_CLIENT_DB_PATH, TARGET_B_PARENTCHAIN_LIGHT_CLIENT_DB_PATH,
};
use itp_types::ShardIdentifier;
use log::*;
Expand All @@ -47,7 +49,7 @@ mod needs_enclave {
enclave: &Enclave,
shard_identifier: &ShardIdentifier,
) -> ServiceResult<()> {
println!("[+] Initialize the shard");
println!("[+] Initialize the shard: {:?}", shard_identifier);
init_shard(enclave, shard_identifier);

let pubkey = enclave.get_ecc_signing_pubkey().unwrap();
Expand Down Expand Up @@ -103,25 +105,53 @@ mod needs_enclave {
}
}

/// Purge all worker files from `dir`.
pub(crate) fn purge_files_from_dir(dir: &Path) -> ServiceResult<()> {
println!("[+] Performing a clean reset of the worker");

println!("[+] Purge all files from previous runs");
purge_files(dir)?;

/// Purge all worker files in a given path.
pub(crate) fn purge_shards_unless_protected(root_directory: &Path) -> ServiceResult<()> {
let mut protectfile = PathBuf::from(root_directory);
protectfile.push("shards.protect");
if fs::metadata(protectfile.clone()).is_ok() {
println!(" all shards and sidechain db are protected by {:?}", protectfile);
} else {
println!("[+] Purge all shards and sidechain blocks from previous runs");
remove_dir_if_it_exists(root_directory, SHARDS_PATH)?;
remove_dir_if_it_exists(root_directory, SIDECHAIN_BLOCKS_DB_STORAGE_PATH)?;
}
Ok(())
}

/// Purge all worker files in a given path.
fn purge_files(root_directory: &Path) -> ServiceResult<()> {
remove_dir_if_it_exists(root_directory, SHARDS_PATH)?;
remove_dir_if_it_exists(root_directory, SIDECHAIN_STORAGE_PATH)?;
pub(crate) fn purge_integritee_lcdb_unless_protected(root_directory: &Path) -> ServiceResult<()> {
let mut protectfile = PathBuf::from(root_directory);
protectfile.push("integritee_lcdb.protect");
if fs::metadata(protectfile.clone()).is_ok() {
println!(" Integritee light-client dB is protected by {:?}", protectfile);
} else {
println!("[+] Purge Integritee light-client db from previous runs");
remove_dir_if_it_exists(root_directory, INTEGRITEE_PARENTCHAIN_LIGHT_CLIENT_DB_PATH)?;
}
Ok(())
}

remove_dir_if_it_exists(root_directory, INTEGRITEE_PARENTCHAIN_LIGHT_CLIENT_DB_PATH)?;
remove_dir_if_it_exists(root_directory, TARGET_A_PARENTCHAIN_LIGHT_CLIENT_DB_PATH)?;
remove_dir_if_it_exists(root_directory, TARGET_B_PARENTCHAIN_LIGHT_CLIENT_DB_PATH)?;
pub(crate) fn purge_target_a_lcdb_unless_protected(root_directory: &Path) -> ServiceResult<()> {
let mut protectfile = PathBuf::from(root_directory);
protectfile.push("target_a_lcdb.protect");
if fs::metadata(protectfile.clone()).is_ok() {
println!(" TargetA light-client dB is protected by {:?}", protectfile);
} else {
println!("[+] Purge TargetA light-client db from previous runs");
remove_dir_if_it_exists(root_directory, TARGET_A_PARENTCHAIN_LIGHT_CLIENT_DB_PATH)?;
}
Ok(())
}

pub(crate) fn purge_target_b_lcdb_unless_protected(root_directory: &Path) -> ServiceResult<()> {
let mut protectfile = PathBuf::from(root_directory);
protectfile.push("target_b_lcdb.protect");
if fs::metadata(protectfile.clone()).is_ok() {
println!(" TargetB light-client dB is protected by {:?}", protectfile);
} else {
println!("[+] Purge TargetB light-client db from previous runs");
remove_dir_if_it_exists(root_directory, TARGET_B_PARENTCHAIN_LIGHT_CLIENT_DB_PATH)?;
}
Ok(())
}

Expand Down Expand Up @@ -150,7 +180,7 @@ mod tests {
fs::File::create(&shards_path.join("state_1.bin")).unwrap();
fs::File::create(&shards_path.join("state_2.bin")).unwrap();

let sidechain_db_path = root_directory.join(SIDECHAIN_STORAGE_PATH);
let sidechain_db_path = root_directory.join(SIDECHAIN_BLOCKS_DB_STORAGE_PATH);
fs::create_dir_all(&sidechain_db_path).unwrap();
fs::File::create(&sidechain_db_path.join("sidechain_db_1.bin")).unwrap();
fs::File::create(&sidechain_db_path.join("sidechain_db_2.bin")).unwrap();
Expand All @@ -163,12 +193,14 @@ mod tests {
fs::create_dir_all(&root_directory.join(TARGET_B_PARENTCHAIN_LIGHT_CLIENT_DB_PATH))
.unwrap();

purge_files(&root_directory).unwrap();

purge_shards_unless_protected(&root_directory).unwrap();
assert!(!shards_path.exists());
assert!(!sidechain_db_path.exists());
purge_integritee_lcdb_unless_protected(&root_directory).unwrap();
assert!(!root_directory.join(INTEGRITEE_PARENTCHAIN_LIGHT_CLIENT_DB_PATH).exists());
purge_target_a_lcdb_unless_protected(&root_directory).unwrap();
assert!(!root_directory.join(TARGET_A_PARENTCHAIN_LIGHT_CLIENT_DB_PATH).exists());
purge_target_b_lcdb_unless_protected(&root_directory).unwrap();
assert!(!root_directory.join(TARGET_B_PARENTCHAIN_LIGHT_CLIENT_DB_PATH).exists());
}

Expand All @@ -179,9 +211,103 @@ mod tests {
));
let root_directory = test_directory_handle.path();

assert!(purge_files(&root_directory).is_ok());
assert!(purge_shards_unless_protected(&root_directory).is_ok());
assert!(purge_integritee_lcdb_unless_protected(&root_directory).is_ok());
assert!(purge_target_a_lcdb_unless_protected(&root_directory).is_ok());
assert!(purge_target_b_lcdb_unless_protected(&root_directory).is_ok());
}

#[test]
fn purge_shards_protect_file_respected() {
let test_directory_handle = TestDirectoryHandle::new(PathBuf::from("test_protect_shard"));
let root_directory = test_directory_handle.path();

let shards_path = root_directory.join(SHARDS_PATH);
fs::create_dir_all(&shards_path).unwrap();
fs::File::create(&shards_path.join("state_1.bin")).unwrap();
fs::File::create(&shards_path.join("state_2.bin")).unwrap();

let sidechain_db_path = root_directory.join(SIDECHAIN_BLOCKS_DB_STORAGE_PATH);
fs::create_dir_all(&sidechain_db_path).unwrap();
fs::File::create(&sidechain_db_path.join("sidechain_db_1.bin")).unwrap();
fs::File::create(&sidechain_db_path.join("sidechain_db_2.bin")).unwrap();
fs::File::create(&sidechain_db_path.join("sidechain_db_3.bin")).unwrap();

let protector_path = root_directory.join("shards.protect");
fs::File::create(&protector_path).unwrap();

purge_shards_unless_protected(&root_directory).unwrap();
assert!(shards_path.exists());
assert!(sidechain_db_path.exists());

fs::remove_file(&protector_path).unwrap();
while protector_path.exists() {
std::thread::sleep(std::time::Duration::from_millis(100));
}
purge_shards_unless_protected(&root_directory).unwrap();
assert!(!shards_path.exists());
assert!(!sidechain_db_path.exists());
}

#[test]
fn purge_integritee_lcdb_protect_file_respected() {
let test_directory_handle =
TestDirectoryHandle::new(PathBuf::from("test_protect_integritee_lcdb"));
let root_directory = test_directory_handle.path();

let lcdb_path = root_directory.join(INTEGRITEE_PARENTCHAIN_LIGHT_CLIENT_DB_PATH);
fs::create_dir_all(&lcdb_path).unwrap();

let protector_path = root_directory.join("integritee_lcdb.protect");
fs::File::create(&protector_path).unwrap();

purge_integritee_lcdb_unless_protected(&root_directory).unwrap();
assert!(lcdb_path.exists());

fs::remove_file(&protector_path).unwrap();
purge_integritee_lcdb_unless_protected(&root_directory).unwrap();
assert!(!lcdb_path.exists());
}

#[test]
fn purge_target_a_lcdb_protect_file_respected() {
let test_directory_handle =
TestDirectoryHandle::new(PathBuf::from("test_protect_target_a_lcdb"));
let root_directory = test_directory_handle.path();

let lcdb_path = root_directory.join(TARGET_A_PARENTCHAIN_LIGHT_CLIENT_DB_PATH);
fs::create_dir_all(&lcdb_path).unwrap();

let protector_path = root_directory.join("target_a_lcdb.protect");
fs::File::create(&protector_path).unwrap();

purge_target_a_lcdb_unless_protected(&root_directory).unwrap();
assert!(lcdb_path.exists());

fs::remove_file(&protector_path).unwrap();
purge_target_a_lcdb_unless_protected(&root_directory).unwrap();
assert!(!lcdb_path.exists());
}

#[test]
fn purge_target_b_lcdb_protect_file_respected() {
let test_directory_handle =
TestDirectoryHandle::new(PathBuf::from("test_protect_target_b_lcdb"));
let root_directory = test_directory_handle.path();

let lcdb_path = root_directory.join(TARGET_B_PARENTCHAIN_LIGHT_CLIENT_DB_PATH);
fs::create_dir_all(&lcdb_path).unwrap();

let protector_path = root_directory.join("target_b_lcdb.protect");
fs::File::create(&protector_path).unwrap();

purge_target_b_lcdb_unless_protected(&root_directory).unwrap();
assert!(lcdb_path.exists());

fs::remove_file(&protector_path).unwrap();
purge_target_b_lcdb_unless_protected(&root_directory).unwrap();
assert!(!lcdb_path.exists());
}
/// Directory handle to automatically initialize a directory
/// and upon dropping the reference, removing it again.
struct TestDirectoryHandle {
Expand Down
13 changes: 10 additions & 3 deletions service/src/tests/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,15 +34,22 @@ pub fn run_enclave_tests(matches: &ArgMatches) {
use itp_enclave_api::enclave_test::EnclaveTest;

println!("*** Starting Test enclave");
let config = Config::from(matches);
setup::purge_files_from_dir(config.data_dir()).unwrap();
let mut config = Config::from(matches).with_test_data_dir();
println!(" creating temporary working dir for tests: {:?}", config.data_dir());
std::fs::create_dir_all(config.data_dir()).unwrap();
setup::purge_shards_unless_protected(config.data_dir()).unwrap();
setup::purge_integritee_lcdb_unless_protected(config.data_dir()).unwrap();
setup::purge_target_a_lcdb_unless_protected(config.data_dir()).unwrap();
setup::purge_target_b_lcdb_unless_protected(config.data_dir()).unwrap();

let enclave = enclave_init(&config).unwrap();

if matches.is_present("all") || matches.is_present("unit") {
println!("Running unit Tests");
enclave.test_main_entrance().unwrap();
println!("[+] unit_test ended!");
}

// clean up test directory
std::fs::remove_dir_all(config.data_dir()).unwrap();
println!("[+] All tests ended!");
}
4 changes: 2 additions & 2 deletions sidechain/storage/src/storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

use super::{db::SidechainDB, Error, Result};
use codec::{Decode, Encode};
use itp_settings::files::SIDECHAIN_STORAGE_PATH;
use itp_settings::files::SIDECHAIN_BLOCKS_DB_STORAGE_PATH;
use its_primitives::{
traits::{Block as BlockTrait, Header as HeaderTrait, SignedBlock as SignedBlockT},
types::{BlockHash, BlockNumber},
Expand Down Expand Up @@ -63,7 +63,7 @@ impl<SignedBlock: SignedBlockT> SidechainStorage<SignedBlock> {
/// Loads existing shards and their last blocks in memory for better performance.
pub fn load_from_base_path(base_path: PathBuf) -> Result<SidechainStorage<SignedBlock>> {
// load db
let db = SidechainDB::open_default(base_path.join(SIDECHAIN_STORAGE_PATH))?;
let db = SidechainDB::open_default(base_path.join(SIDECHAIN_BLOCKS_DB_STORAGE_PATH))?;
let mut storage = SidechainStorage { db, shards: vec![], last_blocks: HashMap::new() };
storage.shards = storage.load_shards_from_db()?;
// get last block of each shard
Expand Down

0 comments on commit b0d4a8d

Please sign in to comment.