Skip to content

Commit

Permalink
Integration test working
Browse files Browse the repository at this point in the history
  • Loading branch information
Kimahriman committed Oct 22, 2023
1 parent bee51b4 commit 96143c9
Show file tree
Hide file tree
Showing 5 changed files with 330 additions and 28 deletions.
44 changes: 30 additions & 14 deletions rust/minidfs/src/main/java/Main.java
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import org.apache.hadoop.security.token.Token;

import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.*;
import static org.apache.hadoop.fs.viewfs.Constants.*;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.*;

Expand Down Expand Up @@ -57,13 +58,7 @@ public static void main(String args[]) throws Exception {
conf.set(DFSConfigKeys.IGNORE_SECURE_PORTS_FOR_TESTING_KEY, "true");
}

MiniDFSNNTopology nnTopology = null;
if (flags.contains("ha")) {
nnTopology = MiniDFSNNTopology.simpleHATopology(3);
conf.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + ".minidfs-ns", "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
conf.set(DFSConfigKeys.DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY, "true");
conf.set(DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_KEY, "true");
}
MiniDFSNNTopology nnTopology = generateTopology(flags, conf);

int numDataNodes = 1;
if (flags.contains("ec")) {
Expand All @@ -79,17 +74,15 @@ public static void main(String args[]) throws Exception {
.numDataNodes(numDataNodes)
.build();

if (flags.contains("ha")) {
hdfsConf.set(FS_DEFAULT_NAME_KEY, "hdfs://minidfs-ns");
} else {
hdfsConf.set(FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:9000");
}

hdfsConf.writeXml(new FileOutputStream("target/test/core-site.xml"));
dfs.waitActive();

int activeNamenode = 0;
if (flags.contains("ha")) {
if (flags.contains("viewfs")) {
// Each name services has two namenodes
dfs.transitionToActive(0);
dfs.transitionToActive(2);
} else if (flags.contains("ha")) {
activeNamenode = 2;
// dfs.transitionToObserver(1);
dfs.transitionToActive(activeNamenode);
Expand Down Expand Up @@ -139,4 +132,27 @@ public static void main(String args[]) throws Exception {
kdc.stop();
}
}

public static MiniDFSNNTopology generateTopology(Set<String> flags, Configuration conf) {
MiniDFSNNTopology nnTopology = null;
if (flags.contains("viewfs")) {
nnTopology = MiniDFSNNTopology.simpleHAFederatedTopology(2);
conf.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + ".ns0", "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
conf.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + ".ns1", "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
conf.set(DFSConfigKeys.DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY, "true");
conf.set(DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_KEY, "true");
conf.set(CONFIG_VIEWFS_PREFIX + ".minidfs-viewfs.link./mount1", "hdfs://ns0/nested");
conf.set(CONFIG_VIEWFS_PREFIX + ".minidfs-viewfs.linkFallback", "hdfs://ns1/nested");
conf.set(FS_DEFAULT_NAME_KEY, "viewfs://minidfs-viewfs");
} else if (flags.contains("ha")) {
nnTopology = MiniDFSNNTopology.simpleHATopology(3);
conf.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + ".minidfs-ns", "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
conf.set(DFSConfigKeys.DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY, "true");
conf.set(DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_KEY, "true");
conf.set(FS_DEFAULT_NAME_KEY, "hdfs://minidfs-ns");
} else {
conf.set(FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:9000");
}
return nnTopology;
}
}
63 changes: 52 additions & 11 deletions rust/src/common/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -130,22 +130,63 @@ mod test {

#[test]
fn test_mount_table_config() {
let mounts = [
("clusterX", "/view1", "/hdfs1"),
("clusterX", "/view2", "/hdfs2"),
("clusterY", "/view3", "/hdfs3"),
];

let fallbacks = [("clusterX", "/hdfs4"), ("clusterY", "/hdfs5")];

let config = Configuration {
map: [(
format!("{}.clusterX.link./view", VIEWFS_MOUNTTABLE_PREFIX),
"hdfs://127.0.0.1:9000/hdfs".to_string(),
)]
.into(),
map: mounts
.iter()
.map(|(cluster, viewfs_path, hdfs_path)| {
(
format!(
"{}.{}.link.{}",
VIEWFS_MOUNTTABLE_PREFIX, cluster, viewfs_path
),
format!("hdfs://127.0.0.1:9000{}", hdfs_path),
)
})
.chain(fallbacks.iter().map(|(cluster, hdfs_path)| {
(
format!("{}.{}.linkFallback", VIEWFS_MOUNTTABLE_PREFIX, cluster),
format!("hdfs://127.0.0.1:9000{}", hdfs_path),
)
}))
.collect(),
};

let mut mount_table = config.get_mount_table("clusterX");
mount_table.sort();
assert_eq!(
config.get_mount_table("clusterX"),
vec![(
Some("/view".to_string()),
"hdfs://127.0.0.1:9000/hdfs".to_string()
)]
vec![
(None, "hdfs://127.0.0.1:9000/hdfs4".to_string()),
(
Some("/view1".to_string()),
"hdfs://127.0.0.1:9000/hdfs1".to_string()
),
(
Some("/view2".to_string()),
"hdfs://127.0.0.1:9000/hdfs2".to_string()
)
],
mount_table
);

assert_eq!(config.get_mount_table("clusterY"), Vec::new());
let mut mount_table = config.get_mount_table("clusterY");
mount_table.sort();
assert_eq!(
mount_table,
vec![
(None, "hdfs://127.0.0.1:9000/hdfs5".to_string()),
(
Some("/view3".to_string()),
"hdfs://127.0.0.1:9000/hdfs3".to_string()
)
]
);
}
}
6 changes: 5 additions & 1 deletion rust/src/minidfs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ pub enum DfsFeatures {
TOKEN,
PRIVACY,
HA,
VIEWFS,
EC,
}

Expand All @@ -21,6 +22,7 @@ impl DfsFeatures {
match self {
DfsFeatures::EC => "ec",
DfsFeatures::HA => "ha",
DfsFeatures::VIEWFS => "viewfs",
DfsFeatures::PRIVACY => "privacy",
DfsFeatures::SECURITY => "security",
DfsFeatures::TOKEN => "token",
Expand Down Expand Up @@ -116,7 +118,9 @@ impl MiniDfs {
}
}

let url = if features.contains(&DfsFeatures::HA) {
let url = if features.contains(&DfsFeatures::VIEWFS) {
"viewfs://minidfs-viewfs"
} else if features.contains(&DfsFeatures::HA) {
"hdfs://minidfs-ns"
} else {
"hdfs://127.0.0.1:9000"
Expand Down
2 changes: 0 additions & 2 deletions rust/tests/common/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,6 @@ pub fn setup(features: &HashSet<DfsFeatures>) -> MiniDfs {
file.path().to_str().unwrap(),
&format!("{}/testfile", dfs.url),
])
.stdout(Stdio::null())
.stderr(Stdio::null())
.status()
.unwrap();
assert!(status.success());
Expand Down
Loading

0 comments on commit 96143c9

Please sign in to comment.