diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3HttpServer.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3HttpServer.java
index ad410d4d388f3..b627699abbf84 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3HttpServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3HttpServer.java
@@ -52,7 +52,7 @@ void start() throws IOException {
NfsConfigKeys.NFS_HTTPS_ADDRESS_DEFAULT);
InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
- HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
+ HttpServer2.Builder builder = DFSUtil.getHttpServerTemplate(conf,
httpAddr, httpsAddr, "nfs3",
NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY,
NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHttpServer.java
index 229b47d7d9e3c..3068aa599b6b7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHttpServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHttpServer.java
@@ -82,7 +82,7 @@ protected void serviceInit(Configuration configuration) throws Exception {
protected void serviceStart() throws Exception {
// Build and start server
String webApp = "router";
- HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(
+ HttpServer2.Builder builder = DFSUtil.getHttpServerTemplate(
this.conf, this.httpAddress, this.httpsAddress, webApp,
RBFConfigKeys.DFS_ROUTER_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
RBFConfigKeys.DFS_ROUTER_KEYTAB_FILE_KEY);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 13150360767a9..5128e5054db22 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -283,6 +283,9 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
+
@@ -428,6 +431,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
src/main/webapps/hdfs/robots.txtsrc/main/webapps/journal/robots.txtsrc/main/webapps/secondary/robots.txt
+ src/main/webapps/balancer/robots.txtsrc/contrib/**src/site/resources/images/*src/main/webapps/static/bootstrap-3.4.1/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index dd3193fdadff2..fc08dd87571ba 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -811,6 +811,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_BALANCER_ADDRESS_DEFAULT= "0.0.0.0:0";
public static final String DFS_BALANCER_KEYTAB_FILE_KEY = "dfs.balancer.keytab.file";
public static final String DFS_BALANCER_KERBEROS_PRINCIPAL_KEY = "dfs.balancer.kerberos.principal";
+ public static final String DFS_BALANCER_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY =
+ "dfs.balancer.kerberos.internal.spnego.principal";
public static final String DFS_BALANCER_BLOCK_MOVE_TIMEOUT = "dfs.balancer.block-move.timeout";
public static final int DFS_BALANCER_BLOCK_MOVE_TIMEOUT_DEFAULT = 0;
public static final String DFS_BALANCER_MAX_NO_MOVE_INTERVAL_KEY = "dfs.balancer.max-no-move-interval";
@@ -821,6 +823,19 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final long DFS_BALANCER_SERVICE_INTERVAL_DEFAULT = TimeUnit.MINUTES.toMillis(5); //5 mins
public static final String DFS_BALANCER_SERVICE_RETRIES_ON_EXCEPTION = "dfs.balancer.service.retries.on.exception";
public static final int DFS_BALANCER_SERVICE_RETRIES_ON_EXCEPTION_DEFAULT = 5;
+ public static final String DFS_BALANCER_HTTPSERVER_ENABLED_KEY =
+ "dfs.balancer.httpserver.enabled";
+ public static final Boolean DFS_BALANCER_HTTPSERVER_ENABLED_DEFAULT = false;
+ public static final String DFS_BALANCER_HTTP_ADDRESS_KEY = "dfs.balancer.http-address";
+ public static final int DFS_BALANCER_HTTP_PORT_DEFAULT = 8590;
+ public static final String DFS_BALANCER_HTTP_BIND_HOST_KEY = "dfs.balancer.http-bind-host";
+ public static final String DFS_BALANCER_HTTP_ADDRESS_DEFAULT =
+ "0.0.0.0:" + DFS_BALANCER_HTTP_PORT_DEFAULT;
+ public static final String DFS_BALANCER_HTTPS_ADDRESS_KEY = "dfs.balancer.https-address";
+ public static final int DFS_BALANCER_HTTPS_PORT_DEFAULT = 8591;
+ public static final String DFS_BALANCER_HTTPS_BIND_HOST_KEY = "dfs.balancer.https-bind-host";
+ public static final String DFS_BALANCER_HTTPS_ADDRESS_DEFAULT =
+ "0.0.0.0:" + DFS_BALANCER_HTTPS_PORT_DEFAULT;
public static final String DFS_MOVER_MOVEDWINWIDTH_KEY = "dfs.mover.movedWinWidth";
public static final long DFS_MOVER_MOVEDWINWIDTH_DEFAULT = 5400*1000L;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 3ecb20bc6a223..69c9cbd6f074a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -1736,11 +1736,11 @@ public static Configuration loadSslConfiguration(Configuration conf) {
}
/**
- * Return a HttpServer.Builder that the journalnode / namenode / secondary
+ * Return a HttpServer.Builder that the journalnode / namenode / secondary / router / balancer
* namenode can use to initialize their HTTP / HTTPS server.
*
*/
- public static HttpServer2.Builder httpServerTemplateForNNAndJN(
+ public static HttpServer2.Builder getHttpServerTemplate(
Configuration conf, final InetSocketAddress httpAddr,
final InetSocketAddress httpsAddr, String name, String spnegoUserNameKey,
String spnegoKeytabFileKey) throws IOException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java
index 4f51fe8b506e0..0d875a9c271bb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java
@@ -73,7 +73,7 @@ void start() throws IOException {
}
}
- HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
+ HttpServer2.Builder builder = DFSUtil.getHttpServerTemplate(conf,
httpAddr, httpsAddr, "journal",
DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
index ff0d8f2c36867..d7b506075d4ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
@@ -38,6 +38,8 @@
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
+import javax.management.ObjectName;
+
import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.source.JvmMetrics;
@@ -66,6 +68,7 @@
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
@@ -76,6 +79,7 @@
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.util.Preconditions;
+import org.apache.hadoop.util.VersionInfo;
/**
The balancer is a tool that balances disk space usage on an HDFS cluster
* when some datanodes become full or when new empty nodes join the cluster.
@@ -180,7 +184,7 @@
*/
@InterfaceAudience.Private
-public class Balancer {
+public class Balancer implements BalancerMXBean {
static final Logger LOG = LoggerFactory.getLogger(Balancer.class);
static final Path BALANCER_ID_PATH = new Path("/system/balancer.id");
@@ -241,6 +245,7 @@ public class Balancer {
private final boolean sortTopNodes;
private final int limitOverUtilizedNum;
private final BalancerMetrics metrics;
+ private ObjectName balancerInfoBeanName;
// all data node lists
private final Collection overUtilized = new LinkedList();
@@ -377,6 +382,8 @@ static int getFailedTimesSinceLastSuccessfulBalance() {
DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
this.metrics = BalancerMetrics.create(this);
+
+ registerBalancerMXBean();
}
private static long getCapacity(DatanodeStorageReport report, StorageType t) {
@@ -680,6 +687,13 @@ private boolean matchStorageGroups(StorageGroup left, StorageGroup right,
left.getDatanodeInfo(), right.getDatanodeInfo());
}
+ /**
+ * Register BalancerMXBean.
+ */
+ private void registerBalancerMXBean() {
+ balancerInfoBeanName = MBeans.register("Balancer", "BalancerInfo", this);
+ }
+
/* reset all fields in a balancer preparing for the next iteration */
void resetData(Configuration conf) {
this.overUtilized.clear();
@@ -689,12 +703,32 @@ void resetData(Configuration conf) {
this.policy.reset();
this.dispatcher.reset(conf);
DefaultMetricsSystem.removeSourceName(metrics.getName());
+ if (balancerInfoBeanName != null) {
+ MBeans.unregister(balancerInfoBeanName);
+ balancerInfoBeanName = null;
+ }
}
NameNodeConnector getNnc() {
return nnc;
}
+ @Override
+ public String getVersion() {
+ return VersionInfo.getVersion() + ", r" + VersionInfo.getRevision();
+ }
+
+ @Override
+ public String getSoftwareVersion() {
+ return VersionInfo.getVersion();
+ }
+
+ @Override
+ public String getCompileInfo() {
+ return VersionInfo.getDate() + " by " + VersionInfo.getUser() + " from "
+ + VersionInfo.getBranch();
+ }
+
static class Result {
private final ExitStatus exitStatus;
private final long bytesLeftToMove;
@@ -860,6 +894,7 @@ static private int doBalance(Collection namenodes,
+ " NameNode");
List connectors = Collections.emptyList();
+ BalancerHttpServer balancerHttpServer = startBalancerHttpServer(conf);
try {
connectors = NameNodeConnector.newNameNodeConnectors(namenodes, nsIds,
Balancer.class.getSimpleName(), BALANCER_ID_PATH, conf,
@@ -872,6 +907,9 @@ static private int doBalance(Collection namenodes,
if (p.getBlockPools().size() == 0
|| p.getBlockPools().contains(nnc.getBlockpoolID())) {
final Balancer b = new Balancer(nnc, p, conf);
+ if (balancerHttpServer != null) {
+ balancerHttpServer.setBalancerAttribute(b);
+ }
final Result r = b.runOneIteration();
r.print(iteration, nnc, System.out);
@@ -898,6 +936,9 @@ static private int doBalance(Collection namenodes,
for(NameNodeConnector nnc : connectors) {
IOUtils.cleanupWithLogger(LOG, nnc);
}
+ if (balancerHttpServer != null) {
+ balancerHttpServer.stop();
+ }
}
return ExitStatus.SUCCESS.getExitCode();
}
@@ -969,6 +1010,18 @@ static void stop() {
serviceRunning = false;
}
+ private static BalancerHttpServer startBalancerHttpServer(Configuration conf) throws IOException {
+ boolean httpServerEnabled = conf.getBoolean(DFSConfigKeys.DFS_BALANCER_HTTPSERVER_ENABLED_KEY,
+ DFSConfigKeys.DFS_BALANCER_HTTPSERVER_ENABLED_DEFAULT);
+ if (httpServerEnabled) {
+ BalancerHttpServer balancerHttpServer = new BalancerHttpServer(conf);
+ balancerHttpServer.start();
+ return balancerHttpServer;
+ } else {
+ return null;
+ }
+ }
+
private static void checkKeytabAndInit(Configuration conf)
throws IOException {
if (conf.getBoolean(DFSConfigKeys.DFS_BALANCER_KEYTAB_ENABLED_KEY,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancerHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancerHttpServer.java
new file mode 100644
index 0000000000000..c376ecabd5dc4
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancerHttpServer.java
@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.balancer;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.server.common.JspHelper;
+import org.apache.hadoop.http.HttpConfig;
+import org.apache.hadoop.http.HttpServer2;
+import org.apache.hadoop.net.NetUtils;
+
+public class BalancerHttpServer {
+
+ private static final String BALANCER_ATTRIBUTE_KEY = "current.balancer";
+
+ private final Configuration conf;
+ private InetSocketAddress httpAddress;
+ private InetSocketAddress httpsAddress;
+ private HttpServer2 httpServer;
+
+ public BalancerHttpServer(Configuration conf) {
+ this.conf = conf;
+ }
+
+ public void start() throws IOException {
+ String webApp = "balancer";
+ // Get HTTP address
+ httpAddress = conf.getSocketAddr(DFSConfigKeys.DFS_BALANCER_HTTP_BIND_HOST_KEY,
+ DFSConfigKeys.DFS_BALANCER_HTTP_ADDRESS_KEY,
+ DFSConfigKeys.DFS_BALANCER_HTTP_ADDRESS_DEFAULT,
+ DFSConfigKeys.DFS_BALANCER_HTTP_PORT_DEFAULT);
+
+ // Get HTTPs address
+ httpsAddress = conf.getSocketAddr(DFSConfigKeys.DFS_BALANCER_HTTPS_BIND_HOST_KEY,
+ DFSConfigKeys.DFS_BALANCER_HTTPS_ADDRESS_KEY,
+ DFSConfigKeys.DFS_BALANCER_HTTPS_ADDRESS_DEFAULT,
+ DFSConfigKeys.DFS_BALANCER_HTTPS_PORT_DEFAULT);
+
+ HttpServer2.Builder builder =
+ DFSUtil.getHttpServerTemplate(conf, httpAddress, httpsAddress, webApp,
+ DFSConfigKeys.DFS_BALANCER_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
+ DFSConfigKeys.DFS_BALANCER_KEYTAB_FILE_KEY);
+
+ final boolean xFrameEnabled = conf.getBoolean(DFSConfigKeys.DFS_XFRAME_OPTION_ENABLED,
+ DFSConfigKeys.DFS_XFRAME_OPTION_ENABLED_DEFAULT);
+
+ final String xFrameOptionValue = conf.getTrimmed(DFSConfigKeys.DFS_XFRAME_OPTION_VALUE,
+ DFSConfigKeys.DFS_XFRAME_OPTION_VALUE_DEFAULT);
+
+ builder.configureXFrame(xFrameEnabled).setXFrameOption(xFrameOptionValue);
+
+ httpServer = builder.build();
+ httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
+ httpServer.start();
+
+ HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
+ int connIdx = 0;
+ if (policy.isHttpEnabled()) {
+ httpAddress = httpServer.getConnectorAddress(connIdx++);
+ if (httpAddress != null) {
+ conf.set(DFSConfigKeys.DFS_BALANCER_HTTP_ADDRESS_KEY,
+ NetUtils.getHostPortString(httpAddress));
+ }
+ }
+ if (policy.isHttpsEnabled()) {
+ httpsAddress = httpServer.getConnectorAddress(connIdx);
+ if (httpsAddress != null) {
+ conf.set(DFSConfigKeys.DFS_BALANCER_HTTPS_ADDRESS_KEY,
+ NetUtils.getHostPortString(httpsAddress));
+ }
+ }
+ }
+
+ public void setBalancerAttribute(Balancer balancer) {
+ httpServer.setAttribute(BALANCER_ATTRIBUTE_KEY, balancer);
+ }
+
+ public void stop() throws IOException {
+ if (httpServer != null) {
+ try {
+ httpServer.stop();
+ } catch (Exception e) {
+ throw new IOException(e);
+ }
+ }
+ }
+
+ public InetSocketAddress getHttpAddress() {
+ return httpAddress;
+ }
+
+ public InetSocketAddress getHttpsAddress() {
+ return httpsAddress;
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancerMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancerMXBean.java
new file mode 100644
index 0000000000000..e732bdd24cf4d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancerMXBean.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.balancer;
+
+public interface BalancerMXBean {
+
+ /**
+ * Gets the version of Hadoop.
+ *
+ * @return the version of Hadoop
+ */
+ String getVersion();
+
+ /**
+ * Get the version of software running on the Balancer.
+ *
+ * @return a string representing the version.
+ */
+ String getSoftwareVersion();
+
+ /**
+ * Get the compilation information which contains date, user and branch.
+ *
+ * @return the compilation information, as a JSON string.
+ */
+ String getCompileInfo();
+
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/package-info.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/package-info.java
new file mode 100644
index 0000000000000..5040eea2b95c2
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/package-info.java
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * The Balancer is a tool for balancing the data across
+ * the storage devices of an HDFS cluster.
+ * Over time, the data in the HDFS storage can become skewed,
+ * The Balancer moves data blocks between DataNodes to balance data distribution.
+ */
+package org.apache.hadoop.hdfs.server.balancer;
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
index 0f4f020ab7812..af527df28bf7c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
@@ -149,7 +149,7 @@ void start() throws IOException {
}
}
- HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
+ HttpServer2.Builder builder = DFSUtil.getHttpServerTemplate(conf,
httpAddr, httpsAddr, "hdfs",
DFSConfigKeys.DFS_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index e95200b35aaa5..5f83a9c8d50ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -474,7 +474,7 @@ public void startInfoServer() throws IOException {
DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_DEFAULT);
InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
- HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
+ HttpServer2.Builder builder = DFSUtil.getHttpServerTemplate(conf,
httpAddr, httpsAddr, "secondary", DFSConfigKeys.
DFS_SECONDARY_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 2ab25f8329ce6..6c0ce707ac724 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4114,6 +4114,60 @@
+
+ dfs.balancer.kerberos.internal.spnego.principal
+
+
+ Kerberos SPNEGO principal name used by the balancer.
+
+
+
+
+ dfs.balancer.httpserver.enabled
+ false
+
+ If true, balancer will start httpserver.
+
+
+
+
+ dfs.balancer.http-address
+ 0.0.0.0:8590
+
+ The balancer http server address and port.
+
+
+
+
+ dfs.balancer.http-bind-host
+
+
+ The actual address the HTTP server will bind to. If this optional address
+ is set, it overrides only the hostname portion of
+ dfs.balancer.http-address. This is useful for making the Balancer
+ HTTP server listen on all interfaces by setting it to 0.0.0.0.
+
+
+
+
+ dfs.balancer.https-address
+ 0.0.0.0:8591
+
+ The balancer HTTPS server address and port.
+
+
+
+
+ dfs.balancer.https-bind-host
+
+
+ The actual address the HTTP server will bind to. If this optional address
+ is set, it overrides only the hostname portion of
+ dfs.balancer.https-address. This is useful for making the Balancer
+ HTTP server listen on all interfaces by setting it to 0.0.0.0.
+
+
+
dfs.http.client.retry.policy.enabledfalse
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/balancer/balancer.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/balancer/balancer.html
new file mode 100644
index 0000000000000..fd8957c560cbb
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/balancer/balancer.html
@@ -0,0 +1,114 @@
+
+
+
+
+
+
+
+ Balancer Information
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/balancer/balancer.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/balancer/balancer.js
new file mode 100644
index 0000000000000..ca43f42fc1caa
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/balancer/balancer.js
@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+(function () {
+ "use strict";
+
+ var data = {};
+
+ dust.loadSource(dust.compile($('#tmpl-balancer').html(), 'balancer'));
+
+ var BEANS = [
+ {"name": "mBeanServerInfo", "url": "/jmx?qry=JMImplementation:type=MBeanServerDelegate"},
+ {"name": "balancer", "url": "/jmx?qry=Hadoop:service=Balancer,name=BalancerInfo"},
+ {"name": "balancers", "url": "/jmx?qry=Hadoop:service=Balancer,name=Balancer-*"}
+ ];
+
+ load_json(BEANS, guard_with_startup_progress(function (d) {
+ for (var k in d) {
+ data[k] = k === 'balancers' ? workaround(d[k].beans) : d[k].beans[0];
+ }
+
+ extractMetrics();
+
+ render();
+ }), function (url, jqxhr, text, err) {
+ show_err_msg('
Failed to retrieve data from ' + url + ', cause: ' + err + '
');
+ });
+
+ function guard_with_startup_progress(fn) {
+ return function () {
+ try {
+ fn.apply(this, arguments);
+ } catch (err) {
+ if (err instanceof TypeError) {
+ show_err_msg('Balancer error: ' + err);
+ }
+ }
+ };
+ }
+
+ function workaround(balancers) {
+ for (var balancer of balancers) {
+ var str = balancer['modelerType'];
+ var index = str.indexOf("-");
+ balancer['BlockPoolID'] = str.substring(index + 1);
+ }
+ return balancers;
+ }
+
+ function extractMetrics() {
+ var str = data['mBeanServerInfo']['MBeanServerId'];
+ var index = str.indexOf("_");
+ if (index !== -1) {
+ data['balancer']['HostName'] = str.substring(0, index);
+ data['balancer']['BalancerStartedTimeInMillis'] = str.substring(index + 1);
+ } else {
+ data['balancer']['HostName'] = "invalid data";
+ data['balancer']['BalancerStartedTimeInMillis'] = "invalid data";
+ }
+ }
+
+ var HELPERS = {
+ 'helper_date_tostring': function (chunk, ctx, bodies, params) {
+ var value = dust.helpers.tap(params.value, chunk, ctx);
+ return chunk.write('' + moment(Number(value)).format('ddd MMM DD HH:mm:ss ZZ YYYY'));
+ }
+ };
+
+ function render() {
+ var base = dust.makeBase(HELPERS);
+ dust.render('balancer', base.push(data), function (err, out) {
+ $('#tab-overview').html(out);
+ $('#tab-overview').addClass('active');
+ });
+ }
+
+ function show_err_msg() {
+ $('#alert-panel-body').html("Failed to load balancer information");
+ $('#alert-panel').show();
+ }
+})();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/balancer/index.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/balancer/index.html
new file mode 100644
index 0000000000000..cd7b52866d425
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/balancer/index.html
@@ -0,0 +1,24 @@
+
+
+
+
+
+ Hadoop Administration
+
+
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/balancer/robots.txt b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/balancer/robots.txt
new file mode 100644
index 0000000000000..1f53798bb4fe3
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/balancer/robots.txt
@@ -0,0 +1,2 @@
+User-agent: *
+Disallow: /
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerHttpServer.java
new file mode 100644
index 0000000000000..2ba70a665b38a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerHttpServer.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.balancer;
+
+import java.io.File;
+import java.net.InetSocketAddress;
+import java.net.URL;
+import java.net.URLConnection;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.web.URLConnectionFactory;
+import org.apache.hadoop.http.HttpConfig;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
+import org.apache.hadoop.test.GenericTestUtils;
+
+import static org.assertj.core.api.AssertionsForClassTypes.assertThat;
+
+public class TestBalancerHttpServer {
+ private static final String BASEDIR =
+ GenericTestUtils.getTempPath(TestBalancerHttpServer.class.getSimpleName());
+ private static String keystoresDir;
+ private static String sslConfDir;
+ private static Configuration conf;
+ private static URLConnectionFactory connectionFactory;
+
+ @BeforeClass
+ public static void setUp() throws Exception {
+ conf = new Configuration();
+ conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTP_ONLY.name());
+ conf.set(DFSConfigKeys.DFS_BALANCER_HTTP_ADDRESS_KEY, "localhost:0");
+ conf.set(DFSConfigKeys.DFS_BALANCER_HTTPS_ADDRESS_KEY, "localhost:0");
+ File base = new File(BASEDIR);
+ FileUtil.fullyDelete(base);
+ base.mkdirs();
+ keystoresDir = new File(BASEDIR).getAbsolutePath();
+ sslConfDir = KeyStoreTestUtil.getClasspathDir(TestBalancerHttpServer.class);
+ KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
+ connectionFactory = URLConnectionFactory.newDefaultURLConnectionFactory(conf);
+ }
+
+ @AfterClass
+ public static void tearDown() throws Exception {
+ FileUtil.fullyDelete(new File(BASEDIR));
+ KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
+ }
+
+ @Test
+ public void testHttpServer() throws Exception {
+ BalancerHttpServer server = null;
+ try {
+ server = new BalancerHttpServer(conf);
+ server.start();
+ assertThat(checkConnection("http", server.getHttpAddress())).isTrue();
+ assertThat(checkConnection("https", server.getHttpsAddress())).isFalse();
+ } finally {
+ if (server != null) {
+ server.stop();
+ }
+ }
+ }
+
+ private boolean checkConnection(String scheme, InetSocketAddress address) {
+ if (address == null) {
+ return false;
+ }
+ try {
+ URL url = new URL(scheme + "://" + NetUtils.getHostPortString(address));
+ URLConnection conn = connectionFactory.openConnection(url);
+ conn.setConnectTimeout(5 * 1000);
+ conn.setReadTimeout(5 * 1000);
+ conn.connect();
+ conn.getContent();
+ return true;
+ } catch (Exception e) {
+ return false;
+ }
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java
index eb0a8ef860a61..6256e858e3c90 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java
@@ -33,10 +33,16 @@
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.MetricsAsserts;
import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.VersionInfo;
+
import org.junit.Test;
+import java.lang.management.ManagementFactory;
import java.util.concurrent.TimeUnit;
+import javax.management.MBeanServer;
+import javax.management.ObjectName;
+
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@@ -210,4 +216,49 @@ public void testBalancerServiceOnError() throws Exception {
}
}
}
+
+ @Test(timeout = 60000)
+ public void testBalancerServiceMetrics() throws Exception {
+ Configuration conf = new HdfsConfiguration();
+ conf.setTimeDuration(DFSConfigKeys.DFS_BALANCER_SERVICE_INTERVAL_KEY, 5, TimeUnit.SECONDS);
+ TestBalancer.initConf(conf);
+ try {
+ setupCluster(conf);
+ TestBalancerWithHANameNodes.waitStoragesNoStale(cluster, client, 0);
+ long totalCapacity = addOneDataNode(conf); // make cluster imbalanced
+
+ Thread balancerThread = newBalancerService(conf, new String[] {"-asService"});
+ balancerThread.start();
+
+ MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+ GenericTestUtils.waitFor(() -> {
+ try {
+ ObjectName mxbeanName = new ObjectName("Hadoop:service=Balancer,name=BalancerInfo");
+ String version = (String) mbs.getAttribute(mxbeanName, "Version");
+ return version.contains(VersionInfo.getVersion()) &&
+ version.contains(VersionInfo.getRevision());
+ } catch (Exception e) {
+ return false;
+ }
+ }, 100, 10000);
+
+ TestBalancer.waitForBalancer(totalUsedSpace, totalCapacity, client, cluster,
+ BalancerParameters.DEFAULT);
+
+ cluster.triggerHeartbeats();
+ cluster.triggerBlockReports();
+
+ // add another empty datanode, wait for cluster become balance again
+ totalCapacity = addOneDataNode(conf);
+ TestBalancer.waitForBalancer(totalUsedSpace, totalCapacity, client, cluster,
+ BalancerParameters.DEFAULT);
+
+ Balancer.stop();
+ balancerThread.join();
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
}