diff --git a/ambari-project/pom.xml b/ambari-project/pom.xml
index 4fecc0d686e..7bb01b5360c 100644
--- a/ambari-project/pom.xml
+++ b/ambari-project/pom.xml
@@ -206,6 +206,12 @@
mockito-core
1.10.19
+
+ org.hamcrest
+ hamcrest-all
+ test
+ 1.3
+
org.powermock
powermock-module-junit4
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index eab8bb43b72..64c3a63798f 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -1247,6 +1247,11 @@
org.eclipse.persistence
eclipselink
+
+ org.hamcrest
+ hamcrest-all
+ test
+
org.mockito
mockito-core
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/ClassifyNameNodeException.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/ClassifyNameNodeException.java
new file mode 100644
index 00000000000..3a3f0e6ee15
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/ClassifyNameNodeException.java
@@ -0,0 +1,26 @@
+/*
+ *
+ * * Licensed to the Apache Software Foundation (ASF) under one
+ * * or more contributor license agreements. See the NOTICE file
+ * * distributed with this work for additional information
+ * * regarding copyright ownership. The ASF licenses this file
+ * * to you under the Apache License, Version 2.0 (the
+ * * "License"); you may not use this file except in compliance
+ * * with the License. You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ *
+ */
+package org.apache.ambari.server.stack;
+
+public class ClassifyNameNodeException extends RuntimeException {
+ public ClassifyNameNodeException(NameService nameService) {
+ super("Could not classify some of the NameNodes in namespace: " + nameService.nameServiceId);
+ }
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/HostsType.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/HostsType.java
index bc29ad5ffcb..530c9ee25f3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/HostsType.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/HostsType.java
@@ -18,9 +18,17 @@
package org.apache.ambari.server.stack;
+import static java.util.Arrays.asList;
+import static java.util.Collections.emptyList;
+import static java.util.Collections.singletonList;
+import static java.util.stream.Collectors.toCollection;
+import static java.util.stream.Collectors.toList;
+
import java.util.ArrayList;
import java.util.LinkedHashSet;
import java.util.List;
+import java.util.Set;
+import java.util.stream.Stream;
import org.apache.ambari.server.state.ServiceComponentHost;
@@ -29,16 +37,10 @@
* also have master/secondary designators.
*/
public class HostsType {
-
- /**
- * The master host, if any.
- */
- public String master = null;
-
/**
- * The secondary host, if any.
+ * List of HA hosts (master - secondaries pairs), if any.
*/
- public String secondary = null;
+ private final List highAvailabilityHosts;
/**
* Ordered collection of hosts. This represents all hosts where an upgrade
@@ -47,10 +49,10 @@ public class HostsType {
* That is to say, a downgrade only occurs where the current version is not
* the target version.
*/
- public LinkedHashSet hosts = new LinkedHashSet<>();
+ private LinkedHashSet hosts;
/**
- * Unhealthy hosts are those which are explicitely put into maintenance mode.
+ * Unhealthy hosts are those which are explicitly put into maintenance mode.
* If there is a host which is not heartbeating (or is generally unhealthy)
* but not in maintenance mode, then the prerequisite upgrade checks will let
* the administrator know that it must be put into maintenance mode before an
@@ -58,4 +60,134 @@ public class HostsType {
*/
public List unhealthy = new ArrayList<>();
+ /**
+ * @return true if master components list is not empty
+ */
+ public boolean hasMasters() {
+ return !getMasters().isEmpty();
+ }
+
+ public List getHighAvailabilityHosts() {
+ return highAvailabilityHosts;
+ }
+
+ /**
+ * Order the hosts so that for each HA host the secondaries come first.
+ * For example: [sec1, sec2, master1, sec3, sec4, master2]
+ */
+ public void arrangeHostSecondariesFirst() {
+ this.hosts = getHighAvailabilityHosts().stream()
+ .flatMap(each -> Stream.concat(each.getSecondaries().stream(), Stream.of(each.getMaster())))
+ .collect(toCollection(LinkedHashSet::new));
+ }
+
+ /**
+ * @return true if both master and secondary components lists are not empty
+ */
+ public boolean hasMastersAndSecondaries() {
+ return !getMasters().isEmpty() && !getSecondaries().isEmpty();
+ }
+
+ /**
+ * A master and secondary host(s). In HA mode there is one master and one secondary host,
+ * in federated mode there can be more than one secondaries.
+ */
+ public static class HighAvailabilityHosts {
+ private final String master;
+ private final List secondaries;
+
+ public HighAvailabilityHosts(String master, List secondaries) {
+ if (master == null) {
+ throw new IllegalArgumentException("Master host is missing");
+ }
+ this.master = master;
+ this.secondaries = secondaries;
+ }
+
+ public String getMaster() {
+ return master;
+ }
+
+ public List getSecondaries() {
+ return secondaries;
+ }
+ }
+
+ /**
+ * Creates an instance from the optional master and secondary components and with the given host set
+ */
+ public static HostsType from(String master, String secondary, LinkedHashSet hosts) {
+ return master == null
+ ? normal(hosts)
+ : new HostsType(singletonList(new HighAvailabilityHosts(master, secondary != null ? singletonList(secondary) : emptyList())), hosts);
+
+ }
+
+ /**
+ * Create an instance with exactly one high availability host (master-secondary pair) and with the given host set
+ */
+ public static HostsType highAvailability(String master, String secondary, LinkedHashSet hosts) {
+ return new HostsType(singletonList(new HighAvailabilityHosts(master, singletonList(secondary))), hosts);
+ }
+
+ /**
+ * Create an instance with an arbitrary chosen high availability host.
+ */
+ public static HostsType guessHighAvailability(LinkedHashSet hosts) {
+ if (hosts.isEmpty()) {
+ throw new IllegalArgumentException("Cannot guess HA, empty hosts.");
+ }
+ String master = hosts.iterator().next();
+ List secondaries = hosts.stream().skip(1).collect(toList());
+ return new HostsType(singletonList(new HighAvailabilityHosts(master, secondaries)), hosts);
+ }
+
+ /**
+ * Create an instance with multiple high availability hosts.
+ */
+ public static HostsType federated(List highAvailabilityHosts, LinkedHashSet hosts) {
+ return new HostsType(highAvailabilityHosts, hosts);
+ }
+
+ /**
+ * Create an instance without high availability hosts.
+ */
+ public static HostsType normal(LinkedHashSet hosts) {
+ return new HostsType(emptyList(), hosts);
+ }
+
+ /**
+ * Create an instance without high availability hosts.
+ */
+ public static HostsType normal(String... hosts) {
+ return new HostsType(emptyList(), new LinkedHashSet<>(asList(hosts)));
+ }
+
+ /**
+ * Create an instance with a single (non high availability) host.
+ */
+ public static HostsType single(String host) {
+ return HostsType.normal(host);
+ }
+
+ private HostsType(List highAvailabilityHosts, LinkedHashSet hosts) {
+ this.highAvailabilityHosts = highAvailabilityHosts;
+ this.hosts = hosts;
+ }
+
+ public LinkedHashSet getMasters() {
+ return highAvailabilityHosts.stream().map(each -> each.getMaster()).collect(toCollection(LinkedHashSet::new));
+ }
+
+ public LinkedHashSet getSecondaries() {
+ return highAvailabilityHosts.stream().flatMap(each -> each.getSecondaries().stream()).collect(toCollection(LinkedHashSet::new));
+ }
+
+ public Set getHosts() {
+ return hosts;
+ }
+
+ public void setHosts(LinkedHashSet hosts) {
+ this.hosts = hosts;
+ }
}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java
index 99211cbaca7..1b5fbb905b8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java
@@ -23,11 +23,11 @@
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.HashMap;
-import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.stream.Collectors;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
@@ -62,7 +62,15 @@ public enum Service {
HDFS,
HBASE,
YARN,
- OTHER
+ OTHER;
+
+ public static Service fromString(String serviceName) {
+ try {
+ return valueOf(serviceName.toUpperCase());
+ } catch (Exception ignore) {
+ return OTHER;
+ }
+ }
}
/**
@@ -104,58 +112,43 @@ public Cluster getCluster() {
* @return The hostname that is the master of the service and component if successful, null otherwise.
*/
public HostsType getMasterAndHosts(String serviceName, String componentName) {
-
if (serviceName == null || componentName == null) {
return null;
}
-
- Set componentHosts = m_cluster.getHosts(serviceName, componentName);
- if (0 == componentHosts.size()) {
+ LinkedHashSet componentHosts = new LinkedHashSet<>(m_cluster.getHosts(serviceName, componentName));
+ if (componentHosts.isEmpty()) {
return null;
}
- HostsType hostsType = new HostsType();
- hostsType.hosts.addAll(componentHosts);
+ HostsType hostsType = HostsType.normal(componentHosts);
- Service s = Service.OTHER;
try {
- s = Service.valueOf(serviceName.toUpperCase());
- } catch (Exception e) {
- // !!! nothing to do
- }
-
- try {
- switch (s) {
+ switch (Service.fromString(serviceName)) {
case HDFS:
- if (componentName.equalsIgnoreCase("NAMENODE")) {
- if (componentHosts.size() != 2) {
- return filterHosts(hostsType, serviceName, componentName);
- }
-
- Map pair = getNameNodePair(componentHosts);
- if (pair != null) {
- hostsType.master = pair.containsKey(Status.ACTIVE) ? pair.get(Status.ACTIVE) : null;
- hostsType.secondary = pair.containsKey(Status.STANDBY) ? pair.get(Status.STANDBY) : null;
- } else {
- // !!! we KNOW we have 2 componentHosts if we're here.
- Iterator iterator = componentHosts.iterator();
- hostsType.master = iterator.next();
- hostsType.secondary = iterator.next();
-
- LOG.warn("Could not determine the active/standby states from NameNodes {}. " +
- "Using {} as active and {} as standby.",
- StringUtils.join(componentHosts, ','), hostsType.master, hostsType.secondary);
+ if (componentName.equalsIgnoreCase("NAMENODE") && componentHosts.size() >= 2) {
+ try {
+ hostsType = HostsType.federated(nameSpaces(componentHosts), componentHosts);
+ } catch (ClassifyNameNodeException | IllegalArgumentException e) {
+ if (componentHosts.size() == 2) { // in HA mode guess if cannot determine active/standby
+ hostsType = HostsType.guessHighAvailability(componentHosts);
+ LOG.warn(
+ "Could not determine the active/standby states from NameNodes {}. Using {} as active and {} as standbys.",
+ componentHosts, hostsType.getMasters(), hostsType.getSecondaries());
+ } else {
+ // XXX fallback to HostsType.normal unsure how to handle this
+ LOG.warn("Could not determine the active/standby states of federated NameNode from NameNodes {}.", componentHosts);
+ }
}
}
break;
case YARN:
if (componentName.equalsIgnoreCase("RESOURCEMANAGER")) {
- resolveResourceManagers(getCluster(), hostsType);
+ hostsType = resolveResourceManagers(getCluster(), componentHosts);
}
break;
case HBASE:
if (componentName.equalsIgnoreCase("HBASE_MASTER")) {
- resolveHBaseMasters(getCluster(), hostsType);
+ hostsType = resolveHBaseMasters(getCluster(), componentHosts);
}
break;
default:
@@ -164,10 +157,7 @@ public HostsType getMasterAndHosts(String serviceName, String componentName) {
} catch (Exception err) {
LOG.error("Unable to get master and hosts for Component " + componentName + ". Error: " + err.getMessage(), err);
}
-
- hostsType = filterHosts(hostsType, serviceName, componentName);
-
- return hostsType;
+ return filterHosts(hostsType, serviceName, componentName);
}
/**
@@ -197,7 +187,7 @@ private HostsType filterHosts(HostsType hostsType, String service, String compon
List unhealthyHosts = new ArrayList<>();
LinkedHashSet upgradeHosts = new LinkedHashSet<>();
- for (String hostName : hostsType.hosts) {
+ for (String hostName : hostsType.getHosts()) {
ServiceComponentHost sch = sc.getServiceComponentHost(hostName);
Host host = sch.getHost();
MaintenanceState maintenanceState = host.getMaintenanceState(sch.getClusterId());
@@ -234,7 +224,7 @@ private HostsType filterHosts(HostsType hostsType, String service, String compon
}
hostsType.unhealthy = unhealthyHosts;
- hostsType.hosts = upgradeHosts;
+ hostsType.setHosts(upgradeHosts);
return hostsType;
} catch (AmbariException e) {
@@ -268,94 +258,79 @@ public boolean isNameNodeHA() throws AmbariException {
}
/**
- * Get mapping of the HDFS Namenodes from the state ("active" or "standby") to the hostname.
- * @return Returns a map from the state ("active" or "standby" to the hostname with that state if exactly
- * one active and one standby host were found, otherwise, return null.
- * The hostnames are returned in lowercase.
+ * Get the NameNode NameSpaces (master->secondaries hosts).
+ * In each NameSpace there should be exactly 1 master and at least one secondary host.
*/
- private Map getNameNodePair(Set componentHosts) throws AmbariException {
- Map stateToHost = new HashMap<>();
- Cluster cluster = getCluster();
+ private List nameSpaces(Set componentHosts) {
+ return NameService.fromConfig(m_configHelper, getCluster()).stream()
+ .map(each -> findMasterAndSecondaries(each, componentHosts))
+ .collect(Collectors.toList());
- String nameService = m_configHelper.getValueFromDesiredConfigurations(cluster, ConfigHelper.HDFS_SITE, "dfs.internal.nameservices");
- if (nameService == null || nameService.isEmpty()) {
- return null;
- }
+ }
- String nnUniqueIDstring = m_configHelper.getValueFromDesiredConfigurations(cluster, ConfigHelper.HDFS_SITE, "dfs.ha.namenodes." + nameService);
- if (nnUniqueIDstring == null || nnUniqueIDstring.isEmpty()) {
- return null;
+ /**
+ * Find the master and secondary namenode(s) based on JMX NameNodeStatus.
+ */
+ private HostsType.HighAvailabilityHosts findMasterAndSecondaries(NameService nameService, Set componentHosts) throws ClassifyNameNodeException {
+ String master = null;
+ List secondaries = new ArrayList<>();
+ for (NameService.NameNode nameNode : nameService.getNameNodes()) {
+ checkForDualNetworkCards(componentHosts, nameNode);
+ String state = queryJmxBeanValue(nameNode.getHost(), nameNode.getPort(), "Hadoop:service=NameNode,name=NameNodeStatus", "State", true, nameNode.isEncrypted());
+ if (Status.ACTIVE.toString().equalsIgnoreCase(state)) {
+ master = nameNode.getHost();
+ } else if (Status.STANDBY.toString().equalsIgnoreCase(state)) {
+ secondaries.add(nameNode.getHost());
+ } else {
+ LOG.error(String.format("Could not retrieve state for NameNode %s from property %s by querying JMX.", nameNode.getHost(), nameNode.getPropertyName()));
+ }
}
-
- String[] nnUniqueIDs = nnUniqueIDstring.split(",");
- if (nnUniqueIDs == null || nnUniqueIDs.length != 2) {
- return null;
+ if (masterAndSecondariesAreFound(componentHosts, master, secondaries)) {
+ return new HostsType.HighAvailabilityHosts(master, secondaries);
}
+ throw new ClassifyNameNodeException(nameService);
+ }
- String policy = m_configHelper.getValueFromDesiredConfigurations(cluster, ConfigHelper.HDFS_SITE, "dfs.http.policy");
- boolean encrypted = (policy != null && policy.equalsIgnoreCase(ConfigHelper.HTTPS_ONLY));
-
- String namenodeFragment = "dfs.namenode." + (encrypted ? "https-address" : "http-address") + ".{0}.{1}";
-
- for (String nnUniqueID : nnUniqueIDs) {
- String key = MessageFormat.format(namenodeFragment, nameService, nnUniqueID);
- String value = m_configHelper.getValueFromDesiredConfigurations(cluster, ConfigHelper.HDFS_SITE, key);
-
- try {
- HostAndPort hp = HTTPUtils.getHostAndPortFromProperty(value);
- if (hp == null) {
- throw new MalformedURLException("Could not parse host and port from " + value);
- }
-
- if (!componentHosts.contains(hp.host)){
- //This may happen when NN HA is configured on dual network card machines with public/private FQDNs.
- LOG.error(
- MessageFormat.format(
- "Hadoop NameNode HA configuration {0} contains host {1} that does not exist in the NameNode hosts list {3}",
- key, hp.host, componentHosts.toString()));
- }
- String state = queryJmxBeanValue(hp.host, hp.port, "Hadoop:service=NameNode,name=NameNodeStatus", "State", true, encrypted);
-
- if (null != state && (state.equalsIgnoreCase(Status.ACTIVE.toString()) || state.equalsIgnoreCase(Status.STANDBY.toString()))) {
- Status status = Status.valueOf(state.toUpperCase());
- stateToHost.put(status, hp.host.toLowerCase());
- } else {
- LOG.error(String.format("Could not retrieve state for NameNode %s from property %s by querying JMX.", hp.host, key));
- }
- } catch (MalformedURLException e) {
- LOG.error(e.getMessage());
- }
+ private static void checkForDualNetworkCards(Set componentHosts, NameService.NameNode nameNode) {
+ if (!componentHosts.contains(nameNode.getHost())) {
+ //This may happen when NN HA is configured on dual network card machines with public/private FQDNs.
+ LOG.error(
+ MessageFormat.format(
+ "Hadoop NameNode HA configuration {0} contains host {1} that does not exist in the NameNode hosts list {3}",
+ nameNode.getPropertyName(), nameNode.getHost(), componentHosts.toString()));
}
+ }
+
+ private static boolean masterAndSecondariesAreFound(Set componentHosts, String master, List secondaries) {
+ return master != null && secondaries.size() + 1 == componentHosts.size() && !secondaries.contains(master);
+ }
- if (stateToHost.containsKey(Status.ACTIVE) && stateToHost.containsKey(Status.STANDBY) && !stateToHost.get(Status.ACTIVE).equalsIgnoreCase(stateToHost.get(Status.STANDBY))) {
- return stateToHost;
+ private HostAndPort parseHostPort(Cluster cluster, String propertyName, String configType) throws MalformedURLException {
+ String propertyValue = m_configHelper.getValueFromDesiredConfigurations(cluster, configType, propertyName);
+ HostAndPort hp = HTTPUtils.getHostAndPortFromProperty(propertyValue);
+ if (hp == null) {
+ throw new MalformedURLException("Could not parse host and port from " + propertyValue);
}
- return null;
+ return hp;
}
/**
* Resolve the name of the Resource Manager master and convert the hostname to lowercase.
- * @param cluster Cluster
- * @param hostType RM hosts
- * @throws MalformedURLException
*/
- private void resolveResourceManagers(Cluster cluster, HostsType hostType) throws MalformedURLException {
- LinkedHashSet orderedHosts = new LinkedHashSet<>(hostType.hosts);
+ private HostsType resolveResourceManagers(Cluster cluster, Set hosts) throws MalformedURLException {
+ String master = null;
+ LinkedHashSet orderedHosts = new LinkedHashSet<>(hosts);
// IMPORTANT, for RM, only the master returns jmx
- String rmWebAppAddress = m_configHelper.getValueFromDesiredConfigurations(cluster, ConfigHelper.YARN_SITE, "yarn.resourcemanager.webapp.address");
- HostAndPort hp = HTTPUtils.getHostAndPortFromProperty(rmWebAppAddress);
- if (hp == null) {
- throw new MalformedURLException("Could not parse host and port from " + rmWebAppAddress);
- }
+ HostAndPort hp = parseHostPort(cluster, "yarn.resourcemanager.webapp.address", ConfigHelper.YARN_SITE);
- for (String hostname : hostType.hosts) {
+ for (String hostname : hosts) {
String value = queryJmxBeanValue(hostname, hp.port,
"Hadoop:service=ResourceManager,name=RMNMInfo", "modelerType", true);
if (null != value) {
- if (null == hostType.master) {
- hostType.master = hostname.toLowerCase();
+ if (master != null) {
+ master = hostname.toLowerCase();
}
// Quick and dirty to make sure the master is last in the list
@@ -364,16 +339,15 @@ private void resolveResourceManagers(Cluster cluster, HostsType hostType) throws
}
}
- hostType.hosts = orderedHosts;
+ return HostsType.from(master, null, orderedHosts);
}
/**
* Resolve the HBASE master and convert the hostname to lowercase.
- * @param cluster Cluster
- * @param hostsType HBASE master host.
- * @throws AmbariException
*/
- private void resolveHBaseMasters(Cluster cluster, HostsType hostsType) throws AmbariException {
+ private HostsType resolveHBaseMasters(Cluster cluster, Set hosts) throws AmbariException {
+ String master = null;
+ String secondary = null;
String hbaseMasterInfoPortProperty = "hbase.master.info.port";
String hbaseMasterInfoPortValue = m_configHelper.getValueFromDesiredConfigurations(cluster, ConfigHelper.HBASE_SITE, hbaseMasterInfoPortProperty);
@@ -382,19 +356,20 @@ private void resolveHBaseMasters(Cluster cluster, HostsType hostsType) throws Am
}
final int hbaseMasterInfoPort = Integer.parseInt(hbaseMasterInfoPortValue);
- for (String hostname : hostsType.hosts) {
+ for (String hostname : hosts) {
String value = queryJmxBeanValue(hostname, hbaseMasterInfoPort,
"Hadoop:service=HBase,name=Master,sub=Server", "tag.isActiveMaster", false);
if (null != value) {
Boolean bool = Boolean.valueOf(value);
if (bool.booleanValue()) {
- hostsType.master = hostname.toLowerCase();
+ master = hostname.toLowerCase();
} else {
- hostsType.secondary = hostname.toLowerCase();
+ secondary = hostname.toLowerCase();
}
}
}
+ return HostsType.from(master, secondary, new LinkedHashSet<>(hosts));
}
protected String queryJmxBeanValue(String hostname, int port, String beanName, String attributeName,
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/NameService.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/NameService.java
new file mode 100644
index 00000000000..313d3bd01b5
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/NameService.java
@@ -0,0 +1,157 @@
+/*
+ *
+ * * Licensed to the Apache Software Foundation (ASF) under one
+ * * or more contributor license agreements. See the NOTICE file
+ * * distributed with this work for additional information
+ * * regarding copyright ownership. The ASF licenses this file
+ * * to you under the Apache License, Version 2.0 (the
+ * * "License"); you may not use this file except in compliance
+ * * with the License. You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ *
+ */
+
+package org.apache.ambari.server.stack;
+
+import static org.apache.commons.lang.StringUtils.isBlank;
+
+import java.text.MessageFormat;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.stream.Collectors;
+
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.utils.HTTPUtils;
+import org.apache.ambari.server.utils.HostAndPort;
+import org.apache.commons.lang.builder.ToStringBuilder;
+
+/**
+ * I represent a nameServiceId that belongs to HDFS. Multiple namenodes may belong to the same nameServiceId.
+ * Each NameNode has either an http or https address.
+ */
+public class NameService {
+
+ public static class NameNode {
+ private final String uniqueId;
+ private final String address;
+ private final boolean encrypted;
+ private final String propertyName;
+
+ public static NameNode fromConfig(String nameServiceId, String nnUniqueId, ConfigHelper config, Cluster cluster) {
+ String namenodeFragment = "dfs.namenode." + (isEncrypted(config, cluster) ? "https-address" : "http-address") + ".{0}.{1}";
+ String propertyName = MessageFormat.format(namenodeFragment, nameServiceId, nnUniqueId);
+ return new NameNode(
+ nnUniqueId,
+ config.getValueFromDesiredConfigurations(cluster, ConfigHelper.HDFS_SITE, propertyName),
+ isEncrypted(config, cluster),
+ propertyName);
+ }
+
+ NameNode(String uniqueId, String address, boolean encrypted, String propertyName) {
+ this.uniqueId = uniqueId;
+ this.address = address;
+ this.encrypted = encrypted;
+ this.propertyName = propertyName;
+ }
+
+ public String getHost() {
+ return getAddress().host.toLowerCase();
+ }
+
+ public int getPort() {
+ return getAddress().port;
+ }
+
+ private HostAndPort getAddress() {
+ HostAndPort hp = HTTPUtils.getHostAndPortFromProperty(address);
+ if (hp == null) {
+ throw new IllegalArgumentException("Could not parse host and port from " + address);
+ }
+ return hp;
+ }
+
+ private static boolean isEncrypted(ConfigHelper config, Cluster cluster) {
+ String policy = config.getValueFromDesiredConfigurations(cluster, ConfigHelper.HDFS_SITE, "dfs.http.policy");
+ return (policy != null && policy.equalsIgnoreCase(ConfigHelper.HTTPS_ONLY));
+ }
+
+ public boolean isEncrypted() {
+ return encrypted;
+ }
+
+ public String getPropertyName() {
+ return propertyName;
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringBuilder(this)
+ .append("uniqueId", uniqueId)
+ .append("address", address)
+ .append("encrypted", encrypted)
+ .append("propertyName", propertyName)
+ .toString();
+ }
+ }
+
+ public final String nameServiceId;
+ /**
+ * NameNodes in this nameservice.
+ * The minimum number of NameNodes for HA is two, but more can be configured.
+ */
+ private final List nameNodes;
+
+ public static List fromConfig(ConfigHelper config, Cluster cluster) {
+ return nameServiceIds(config, cluster).stream()
+ .map(nameServiceId -> nameService(nameServiceId, config, cluster))
+ .collect(Collectors.toList());
+ }
+
+ private static List nameServiceIds(ConfigHelper config, Cluster cluster) {
+ return separateByComma(config, cluster, "dfs.internal.nameservices");
+ }
+
+ private static NameService nameService(String nameServiceId, ConfigHelper config, Cluster cluster) {
+ List namenodes = nnUniqueIds(nameServiceId, config, cluster).stream()
+ .map(nnUniquId -> NameNode.fromConfig(nameServiceId, nnUniquId, config, cluster))
+ .collect(Collectors.toList());
+ return new NameService(nameServiceId, namenodes);
+ }
+
+ private static List nnUniqueIds(String nameServiceId, ConfigHelper config, Cluster cluster) {
+ return separateByComma(config, cluster, "dfs.ha.namenodes." + nameServiceId);
+ }
+
+ private static List separateByComma(ConfigHelper config, Cluster cluster, String propertyName) {
+ String propertyValue = config.getValueFromDesiredConfigurations(cluster, ConfigHelper.HDFS_SITE, propertyName);
+ return isBlank(propertyValue)
+ ? Collections.emptyList()
+ : Arrays.asList(propertyValue.split(","));
+ }
+
+ private NameService(String nameServiceId, List nameNodes) {
+ this.nameServiceId = nameServiceId;
+ this.nameNodes = nameNodes;
+ }
+
+ public List getNameNodes() {
+ return nameNodes;
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringBuilder(this)
+ .append("nameServiceId", nameServiceId)
+ .append("nameNodes", getNameNodes())
+ .toString();
+ }
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
index 8f9d8e1b0b4..0425189cd17 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
@@ -219,8 +219,6 @@ static Placeholder find(String pattern) {
* {@code Direction} of the upgrade
* @param upgradeType
* The {@code UpgradeType}
- * @param targetStackName
- * The destination target stack name.
* @param preferredUpgradePackName
* For unit test, need to prefer an upgrade pack since multiple
* matches can be found.
@@ -440,49 +438,26 @@ public List createSequence(UpgradePack upgradePack,
// Non-NameNode HA: Upgrade first the SECONDARY, then the primary NAMENODE
switch (upgradePack.getType()) {
case ROLLING:
- if (!hostsType.hosts.isEmpty() && hostsType.master != null && hostsType.secondary != null) {
+ if (!hostsType.getHosts().isEmpty() && hostsType.hasMastersAndSecondaries()) {
// The order is important, first do the standby, then the active namenode.
- LinkedHashSet order = new LinkedHashSet<>();
-
- order.add(hostsType.secondary);
- order.add(hostsType.master);
-
- // Override the hosts with the ordered collection
- hostsType.hosts = order;
-
+ hostsType.arrangeHostSecondariesFirst();
builder.add(context, hostsType, service.serviceName,
svc.isClientOnlyService(), pc, null);
} else {
LOG.warn("Could not orchestrate NameNode. Hosts could not be resolved: hosts={}, active={}, standby={}",
- StringUtils.join(hostsType.hosts, ','), hostsType.master, hostsType.secondary);
+ StringUtils.join(hostsType.getHosts(), ','), hostsType.getMasters(), hostsType.getSecondaries());
}
break;
case NON_ROLLING:
boolean isNameNodeHA = mhr.isNameNodeHA();
- if (isNameNodeHA && hostsType.master != null && hostsType.secondary != null) {
+ if (isNameNodeHA && hostsType.hasMastersAndSecondaries()) {
// This could be any order, but the NameNodes have to know what role they are going to take.
// So need to make 2 stages, and add different parameters to each one.
+ builder.add(context, HostsType.normal(hostsType.getMasters()), service.serviceName,
+ svc.isClientOnlyService(), pc, nameNodeRole("active"));
- HostsType ht1 = new HostsType();
- LinkedHashSet h1Hosts = new LinkedHashSet<>();
- h1Hosts.add(hostsType.master);
- ht1.hosts = h1Hosts;
- Map h1Params = new HashMap<>();
- h1Params.put("desired_namenode_role", "active");
-
- HostsType ht2 = new HostsType();
- LinkedHashSet h2Hosts = new LinkedHashSet<>();
- h2Hosts.add(hostsType.secondary);
- ht2.hosts = h2Hosts;
- Map h2Params = new HashMap<>();
- h2Params.put("desired_namenode_role", "standby");
-
-
- builder.add(context, ht1, service.serviceName,
- svc.isClientOnlyService(), pc, h1Params);
-
- builder.add(context, ht2, service.serviceName,
- svc.isClientOnlyService(), pc, h2Params);
+ builder.add(context, HostsType.normal(hostsType.getSecondaries()), service.serviceName,
+ svc.isClientOnlyService(), pc, nameNodeRole("standby"));
} else {
// If no NameNode HA, then don't need to change hostsType.hosts since there should be exactly one.
builder.add(context, hostsType, service.serviceName,
@@ -552,6 +527,12 @@ public List createSequence(UpgradePack upgradePack,
return groups;
}
+ private static Map nameNodeRole(String value) {
+ Map params = new HashMap<>();
+ params.put("desired_namenode_role", value);
+ return params;
+ }
+
/**
* Merges two service check groups when they have been orchestrated back-to-back.
* @param newHolder the "new" group holder, which was orchestrated after the "old" one
@@ -665,7 +646,7 @@ private String tokenReplace(UpgradeContext ctx, String source, String service, S
HostsType hostsType = mhr.getMasterAndHosts(service, component);
if (null != hostsType) {
- value = StringUtils.join(hostsType.hosts, ", ");
+ value = StringUtils.join(hostsType.getHosts(), ", ");
}
}
break;
@@ -675,7 +656,7 @@ private String tokenReplace(UpgradeContext ctx, String source, String service, S
HostsType hostsType = mhr.getMasterAndHosts(service, component);
if (null != hostsType) {
- value = hostsType.master;
+ value = StringUtils.join(hostsType.getMasters(), ", ");
}
}
break;
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
index 55c6ee7e008..2ff186d3f90 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
@@ -252,10 +252,10 @@ private StageWrapper getServerActionStageWrapper(UpgradeContext ctx, ExecuteStag
if (StringUtils.isNotEmpty(service) && StringUtils.isNotEmpty(component)) {
HostsType hosts = ctx.getResolver().getMasterAndHosts(service, component);
- if (null == hosts || hosts.hosts.isEmpty()) {
+ if (null == hosts || hosts.getHosts().isEmpty()) {
return null;
} else {
- realHosts = new LinkedHashSet<>(hosts.hosts);
+ realHosts = new LinkedHashSet<>(hosts.getHosts());
}
}
@@ -301,19 +301,19 @@ private StageWrapper getExecuteStageWrapper(UpgradeContext ctx, ExecuteStage exe
if (hosts != null) {
- Set realHosts = new LinkedHashSet<>(hosts.hosts);
- if (ExecuteHostType.MASTER == et.hosts && null != hosts.master) {
- realHosts = Collections.singleton(hosts.master);
+ Set realHosts = new LinkedHashSet<>(hosts.getHosts());
+ if (ExecuteHostType.MASTER == et.hosts && hosts.hasMasters()) {
+ realHosts = hosts.getMasters();
}
// Pick a random host.
- if (ExecuteHostType.ANY == et.hosts && !hosts.hosts.isEmpty()) {
- realHosts = Collections.singleton(hosts.hosts.iterator().next());
+ if (ExecuteHostType.ANY == et.hosts && !hosts.getHosts().isEmpty()) {
+ realHosts = Collections.singleton(hosts.getHosts().iterator().next());
}
// Pick the first host sorted alphabetically (case insensitive)
- if (ExecuteHostType.FIRST == et.hosts && !hosts.hosts.isEmpty()) {
- List sortedHosts = new ArrayList<>(hosts.hosts);
+ if (ExecuteHostType.FIRST == et.hosts && !hosts.getHighAvailabilityHosts().isEmpty()) {
+ List sortedHosts = new ArrayList<>(hosts.getHosts());
Collections.sort(sortedHosts, String.CASE_INSENSITIVE_ORDER);
realHosts = Collections.singleton(sortedHosts.get(0));
}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ColocatedGrouping.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ColocatedGrouping.java
index f8daf828ab6..589e7660d39 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ColocatedGrouping.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ColocatedGrouping.java
@@ -90,13 +90,12 @@ public void add(UpgradeContext context, HostsType hostsType, String service,
boolean clientOnly, ProcessingComponent pc, Map params) {
int count = Double.valueOf(Math.ceil(
- (double) m_batch.percent / 100 * hostsType.hosts.size())).intValue();
+ (double) m_batch.percent / 100 * hostsType.getHosts().size())).intValue();
int i = 0;
- for (String host : hostsType.hosts) {
+ for (String host : hostsType.getHosts()) {
// This class required inserting a single host into the collection
- HostsType singleHostsType = new HostsType();
- singleHostsType.hosts.add(host);
+ HostsType singleHostsType = HostsType.single(host);
Map> targetMap = ((i++) < count) ? initialBatch : finalBatches;
List targetList = targetMap.get(host);
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java
index a2257d9046a..32487177ab1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java
@@ -155,7 +155,7 @@ public void add(UpgradeContext context, HostsType hostsType, String service,
// Add the processing component
Task t = resolveTask(context, pc);
if (null != t) {
- TaskWrapper tw = new TaskWrapper(service, pc.name, hostsType.hosts, params, Collections.singletonList(t));
+ TaskWrapper tw = new TaskWrapper(service, pc.name, hostsType.getHosts(), params, Collections.singletonList(t));
addTasksToStageInBatches(Collections.singletonList(tw), t.getActionVerb(), context, service, pc, params);
}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/HostOrderGrouping.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/HostOrderGrouping.java
index acf0639f935..eb79222f91d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/HostOrderGrouping.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/HostOrderGrouping.java
@@ -173,7 +173,7 @@ private List buildHosts(UpgradeContext upgradeContext, List getTaskList(String service, String component, Ho
for (Task t : tasks) {
// only add the server-side task if there are actual hosts for the service/component
- if (t.getType().isServerAction() && CollectionUtils.isNotEmpty(hostsType.hosts)) {
+ if (t.getType().isServerAction() && CollectionUtils.isNotEmpty(hostsType.getHosts())) {
collection.add(new TaskWrapper(service, component, Collections.singleton(ambariServerHostname), params, t));
continue;
}
@@ -68,8 +68,8 @@ public static List getTaskList(String service, String component, Ho
if (t.getType().equals(Task.Type.EXECUTE)) {
ExecuteTask et = (ExecuteTask) t;
if (et.hosts == ExecuteHostType.MASTER) {
- if (hostsType.master != null) {
- collection.add(new TaskWrapper(service, component, Collections.singleton(hostsType.master), params, t));
+ if (hostsType.hasMasters()) {
+ collection.add(new TaskWrapper(service, component, hostsType.getMasters(), params, t));
continue;
} else {
LOG.error(MessageFormat.format("Found an Execute task for {0} and {1} meant to run on a master but could not find any masters to run on. Skipping this task.", service, component));
@@ -78,8 +78,8 @@ public static List getTaskList(String service, String component, Ho
}
// Pick a random host.
if (et.hosts == ExecuteHostType.ANY) {
- if (hostsType.hosts != null && !hostsType.hosts.isEmpty()) {
- collection.add(new TaskWrapper(service, component, Collections.singleton(hostsType.hosts.iterator().next()), params, t));
+ if (!hostsType.getHosts().isEmpty()) {
+ collection.add(new TaskWrapper(service, component, Collections.singleton(hostsType.getHosts().iterator().next()), params, t));
continue;
} else {
LOG.error(MessageFormat.format("Found an Execute task for {0} and {1} meant to run on any host but could not find host to run on. Skipping this task.", service, component));
@@ -89,8 +89,8 @@ public static List getTaskList(String service, String component, Ho
// Pick the first host sorted alphabetically (case insensitive).
if (et.hosts == ExecuteHostType.FIRST) {
- if (hostsType.hosts != null && !hostsType.hosts.isEmpty()) {
- List sortedHosts = new ArrayList<>(hostsType.hosts);
+ if (!hostsType.getHosts().isEmpty()) {
+ List sortedHosts = new ArrayList<>(hostsType.getHosts());
Collections.sort(sortedHosts, String.CASE_INSENSITIVE_ORDER);
collection.add(new TaskWrapper(service, component, Collections.singleton(sortedHosts.get(0)), params, t));
continue;
@@ -102,7 +102,7 @@ public static List getTaskList(String service, String component, Ho
// Otherwise, meant to run on ALL hosts.
}
- collection.add(new TaskWrapper(service, component, hostsType.hosts, params, t));
+ collection.add(new TaskWrapper(service, component, hostsType.getHosts(), params, t));
}
return collection;
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/HostsTypeTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/HostsTypeTest.java
new file mode 100644
index 00000000000..af5f8996f20
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/HostsTypeTest.java
@@ -0,0 +1,82 @@
+/*
+ *
+ * * Licensed to the Apache Software Foundation (ASF) under one
+ * * or more contributor license agreements. See the NOTICE file
+ * * distributed with this work for additional information
+ * * regarding copyright ownership. The ASF licenses this file
+ * * to you under the Apache License, Version 2.0 (the
+ * * "License"); you may not use this file except in compliance
+ * * with the License. You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ *
+ */
+
+package org.apache.ambari.server.stack;
+
+import static com.google.common.collect.Sets.newHashSet;
+import static com.google.common.collect.Sets.newLinkedHashSet;
+import static java.util.Arrays.asList;
+import static java.util.Collections.emptyList;
+import static java.util.Collections.emptySet;
+import static java.util.Collections.singleton;
+import static org.hamcrest.collection.IsCollectionWithSize.hasSize;
+import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertThat;
+
+import java.util.LinkedHashSet;
+
+import org.junit.Test;
+
+
+public class HostsTypeTest {
+ @Test
+ public void testGuessMasterFrom1() {
+ HostsType hosts = HostsType.guessHighAvailability(newLinkedHashSet(asList("c6401")));
+ assertThat(hosts.getMasters(), is(singleton("c6401")));
+ assertThat(hosts.getSecondaries(), hasSize(0));
+ }
+
+ @Test
+ public void testGuessMasterFrom3() {
+ HostsType hosts = HostsType.guessHighAvailability(newLinkedHashSet(asList("c6401", "c6402", "c6403")));
+ assertThat(hosts.getMasters(), is(singleton("c6401")));
+ assertThat(hosts.getSecondaries(), is(newLinkedHashSet(asList("c6402", "c6403"))));
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testGuessMasterFromEmptyList() {
+ HostsType.guessHighAvailability(new LinkedHashSet<>(emptySet()));
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testMasterIsMandatory() {
+ new HostsType.HighAvailabilityHosts(null, emptyList());
+ }
+
+ @Test
+ public void testFederatedMastersAndSecondaries() {
+ HostsType federated = HostsType.federated(asList(
+ new HostsType.HighAvailabilityHosts("master1", asList("sec1", "sec2")),
+ new HostsType.HighAvailabilityHosts("master2", asList("sec3", "sec4"))),
+ new LinkedHashSet<>(emptySet()));
+ assertThat(federated.getMasters(), is(newHashSet("master1", "master2")));
+ assertThat(federated.getSecondaries(), is(newHashSet("sec1", "sec2", "sec3", "sec4")));
+ }
+
+ @Test
+ public void testArrangeHosts() {
+ HostsType federated = HostsType.federated(asList(
+ new HostsType.HighAvailabilityHosts("master1", asList("sec1", "sec2")),
+ new HostsType.HighAvailabilityHosts("master2", asList("sec3", "sec4"))),
+ new LinkedHashSet<>(emptySet()));
+ federated.arrangeHostSecondariesFirst();
+ assertThat(federated.getHosts(), is(newLinkedHashSet(asList("sec1", "sec2", "master1", "sec3", "sec4", "master2"))));
+ }
+}
\ No newline at end of file
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/NameServiceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/NameServiceTest.java
new file mode 100644
index 00000000000..4b6160ba596
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/NameServiceTest.java
@@ -0,0 +1,136 @@
+/*
+ *
+ * * Licensed to the Apache Software Foundation (ASF) under one
+ * * or more contributor license agreements. See the NOTICE file
+ * * distributed with this work for additional information
+ * * regarding copyright ownership. The ASF licenses this file
+ * * to you under the Apache License, Version 2.0 (the
+ * * "License"); you may not use this file except in compliance
+ * * with the License. You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ *
+ */
+
+package org.apache.ambari.server.stack;
+
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.hamcrest.Matchers.hasProperty;
+import static org.hamcrest.Matchers.hasSize;
+import static org.hamcrest.core.AllOf.allOf;
+import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertThat;
+import static org.junit.internal.matchers.IsCollectionContaining.hasItems;
+
+import java.util.List;
+
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.ConfigHelper;
+import org.easymock.EasyMock;
+import org.easymock.EasyMockSupport;
+import org.hamcrest.Matcher;
+import org.junit.Test;
+
+public class NameServiceTest extends EasyMockSupport {
+ private ConfigHelper config = EasyMock.createNiceMock(ConfigHelper.class);
+ private Cluster cluster = mock(Cluster.class);
+
+ @Test
+ public void testParseSingleNameService() {
+ defineHdfsProperty("dfs.internal.nameservices", "ns1");
+ defineHdfsProperty("dfs.ha.namenodes.ns1", "nn1");
+ defineHdfsProperty("dfs.namenode.http-address.ns1.nn1", "c6401:1234");
+ replay(config);
+ List nameServices = NameService.fromConfig(config, cluster);
+ assertThat(nameServices, hasSize(1));
+ assertThat(nameServices.get(0).nameServiceId, is("ns1"));
+ assertThat(nameServices.get(0).getNameNodes(), hasOnlyItems(allOf(
+ hasHost("c6401"),
+ hasPort(1234),
+ hasPropertyName("dfs.namenode.http-address.ns1.nn1"))));
+ }
+
+ @Test
+ public void testParseSingleNameServiceWhenHttpsEnabled() {
+ defineHdfsProperty("dfs.internal.nameservices", "ns1");
+ defineHdfsProperty("dfs.ha.namenodes.ns1", "nn1");
+ defineHdfsProperty("dfs.namenode.https-address.ns1.nn1", "c6401:4567");
+ defineHdfsProperty("dfs.http.policy", ConfigHelper.HTTPS_ONLY);
+ replay(config);
+ List nameServices = NameService.fromConfig(config, cluster);
+ assertThat(
+ nameServices.get(0).getNameNodes(),
+ hasOnlyItems(allOf(hasPort(4567), hasPropertyName("dfs.namenode.https-address.ns1.nn1"))));
+ }
+
+ @Test
+ public void testParseFederatedNameService() {
+ defineHdfsProperty("dfs.internal.nameservices", "ns1,ns2");
+ defineHdfsProperty("dfs.ha.namenodes.ns1", "nn1,nn2");
+ defineHdfsProperty("dfs.ha.namenodes.ns2", "nn3,nn4");
+ defineHdfsProperty("dfs.namenode.http-address.ns1.nn1", "c6401:1234");
+ defineHdfsProperty("dfs.namenode.http-address.ns1.nn2", "c6402:1234");
+ defineHdfsProperty("dfs.namenode.http-address.ns2.nn3", "c6403:1234");
+ defineHdfsProperty("dfs.namenode.http-address.ns2.nn4", "c6404:1234");
+ replay(config);
+ assertThat(NameService.fromConfig(config, cluster), hasOnlyItems(
+ hasNameNodes(hasOnlyItems(hasHost("c6401"), hasHost("c6402"))),
+ hasNameNodes(hasOnlyItems(hasHost("c6403"), hasHost("c6404")))
+ ));
+ }
+
+ @Test
+ public void tesEmptyWhenNameServiceIdIsMissingFromConfig() {
+ defineHdfsProperty("dfs.internal.nameservices", null);
+ replay(config);
+ assertThat(NameService.fromConfig(config, cluster), hasSize(0));
+ }
+
+ @Test
+ public void tesEmptyNameNodesWhenNs1IsMissingFromConfig() {
+ defineHdfsProperty("dfs.internal.nameservices", "ns1");
+ defineHdfsProperty("dfs.ha.namenodes.ns1", null);
+ replay(config);
+ assertThat(NameService.fromConfig(config, cluster).get(0).getNameNodes(), hasSize(0));
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void tesExceptionWhenNameNodeAddressIsMissingFromConfig() {
+ defineHdfsProperty("dfs.internal.nameservices", "ns1");
+ defineHdfsProperty("dfs.ha.namenodes.ns1", "nn1");
+ defineHdfsProperty("dfs.namenode.http-address.ns1.nn1", null);
+ replay(config);
+ NameService.fromConfig(config, cluster).get(0).getNameNodes().get(0).getHost();
+ }
+
+ private Matcher hasOnlyItems(Matcher... matchers) {
+ return allOf(hasSize(matchers.length), hasItems(matchers));
+ }
+
+ private Matcher hasNameNodes(Matcher matcher) {
+ return hasProperty("nameNodes", matcher);
+ }
+
+ private Matcher hasHost(String host) {
+ return hasProperty("host", is(host));
+ }
+
+ private Matcher