> headers = new HashMap<>();
+
+ String base64 = Base64.encodeBase64String(
+ String.format("%s:%s", username, password).getBytes(Charset.forName("UTF8")));
+
+ headers.put("Content-Type", Arrays.asList("application/json"));
+ headers.put("Accept", Arrays.asList("application/json"));
+ headers.put("Authorization", Arrays.asList(String.format("Basic %s", base64)));
+
+ return headers;
+ }
+
+ /**
+ * Finds the property value. If not found, then the failure reason for the check
+ * is filled in and processing should not continue.
+ *
+ * @param type the type of property to find
+ * @param key the key in configs matching the type
+ * @param check the check for loading failure reasons
+ * @param request the request for loading failure reasons
+ * @return the property value, or {@code null} if the property doesn't exist
+ * @throws AmbariException
+ */
+ private String checkEmpty(String type, String key, PrerequisiteCheck check,
+ PrereqCheckRequest request) throws AmbariException {
+
+ String value = getProperty(request, type, key);
+ if (null == value) {
+ String reason = getFailReason(KEY_RANGER_CONFIG_MISSING, check, request);
+ reason = String.format(reason, type, key);
+ check.setFailReason(reason);
+ check.getFailedOn().add("RANGER");
+ check.setStatus(PrereqCheckStatus.WARNING);
+ }
+ return value;
+ }
+
+
+}
\ No newline at end of file
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/RangerSSLConfigCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/RangerSSLConfigCheck.java
new file mode 100644
index 00000000000..540fd3e7fd6
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/RangerSSLConfigCheck.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.checks;
+
+
+import java.util.Set;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.PrereqCheckRequest;
+import org.apache.ambari.server.state.stack.PrereqCheckStatus;
+import org.apache.ambari.server.state.stack.PrerequisiteCheck;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Sets;
+import com.google.inject.Singleton;
+
+
+/**
+ * This service check will mainly be for 2.6 stacks so as to encourage user
+ * to move the certificate, keystore and truststore from the default conf dir to
+ * an external directory untoched while RU/EU during upgrades/downgrades.
+ */
+@Singleton
+@UpgradeCheck(group = UpgradeCheckGroup.INFORMATIONAL_WARNING)
+public class RangerSSLConfigCheck extends AbstractCheckDescriptor {
+
+ private static final Logger LOG = LoggerFactory.getLogger(RangerSSLConfigCheck.class);
+ private static final String serviceName = "RANGER";
+
+
+ /**
+ * Constructor
+ */
+ public RangerSSLConfigCheck() {
+ super(CheckDescription.RANGER_SSL_CONFIG_CHECK);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public Set getApplicableServices() {
+ return Sets.newHashSet(serviceName);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException {
+ String isRangerHTTPEnabled = getProperty(request, "ranger-admin-site", "ranger.service.http.enabled");
+ String isRangerSSLEnabled = getProperty(request, "ranger-admin-site", "ranger.service.https.attrib.ssl.enabled");
+ String rangerSSLKeystoreFile = getProperty(request, "ranger-admin-site", "ranger.https.attrib.keystore.file");
+
+ if (("false").equalsIgnoreCase(isRangerHTTPEnabled) && ("true").equalsIgnoreCase(isRangerSSLEnabled) && rangerSSLKeystoreFile.contains("/etc/ranger/admin/conf") ) {
+ LOG.info("Ranger is SSL enabled, need to show Configuration changes warning before upragade proceeds.");
+ prerequisiteCheck.getFailedOn().add(serviceName);
+ prerequisiteCheck.setStatus(PrereqCheckStatus.WARNING);
+ prerequisiteCheck.setFailReason(getFailReason(prerequisiteCheck, request));
+ } else {
+ LOG.info("Ranger is not SSL enabled, no need to show Configuration changes warning before upragade proceeds.");
+ }
+
+ }
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/SecondaryNamenodeDeletedCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/SecondaryNamenodeDeletedCheck.java
new file mode 100644
index 00000000000..349b2609516
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/SecondaryNamenodeDeletedCheck.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.checks;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.ServiceComponentNotFoundException;
+import org.apache.ambari.server.controller.PrereqCheckRequest;
+import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
+import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
+import org.apache.ambari.server.stack.MasterHostResolver;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponent;
+import org.apache.ambari.server.state.stack.PrereqCheckStatus;
+import org.apache.ambari.server.state.stack.PrerequisiteCheck;
+
+import com.google.common.collect.Sets;
+import com.google.inject.Inject;
+import com.google.inject.Singleton;
+
+/**
+ * Checks that the Secondary NameNode is not present on any of the hosts.
+ */
+@Singleton
+@UpgradeCheck(group = UpgradeCheckGroup.NAMENODE_HA, order = 16.0f)
+public class SecondaryNamenodeDeletedCheck extends AbstractCheckDescriptor {
+ private static final String HDFS_SERVICE_NAME = MasterHostResolver.Service.HDFS.name();
+
+ @Inject
+ HostComponentStateDAO hostComponentStateDao;
+ /**
+ * Constructor.
+ */
+ public SecondaryNamenodeDeletedCheck() {
+ super(CheckDescription.SECONDARY_NAMENODE_MUST_BE_DELETED);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public Set getApplicableServices() {
+ return Sets.newHashSet(HDFS_SERVICE_NAME);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public List getQualifications() {
+ return Arrays.asList(
+ new PriorCheckQualification(CheckDescription.SERVICES_NAMENODE_HA));
+ }
+
+ // TODO AMBARI-12698, there are 2 ways to filter the prechecks.
+ // 1. Explictly mention them in each upgrade pack, which is more flexible, but requires adding the name of checks
+ // to perform in each upgrade pack.
+ // 2. Make each upgrade check class call a function before perform() that will determine if the check is appropriate
+ // given the type of upgrade. The PrereqCheckRequest object has a field for the type of upgrade.
+ @Override
+ public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException {
+ Set hosts = new HashSet<>();
+ final String SECONDARY_NAMENODE = "SECONDARY_NAMENODE";
+
+ final String clusterName = request.getClusterName();
+ final Cluster cluster = clustersProvider.get().getCluster(clusterName);
+ try {
+ ServiceComponent serviceComponent = cluster.getService(HDFS_SERVICE_NAME).getServiceComponent(SECONDARY_NAMENODE);
+ if (serviceComponent != null) {
+ hosts = serviceComponent.getServiceComponentHosts().keySet();
+ }
+ } catch (ServiceComponentNotFoundException err) {
+ // This exception can be ignored if the component doesn't exist because it is a best-attempt at finding it.
+ ;
+ }
+
+ // Try another method to find references to SECONDARY_NAMENODE
+ if (hosts.isEmpty()) {
+ List allHostComponents = hostComponentStateDao.findAll();
+ for(HostComponentStateEntity hc : allHostComponents) {
+ Service s = cluster.getService(hc.getServiceId());
+ if (s.getServiceType().equalsIgnoreCase(HDFS_SERVICE_NAME) && hc.getComponentName().equalsIgnoreCase(SECONDARY_NAMENODE)) {
+ hosts.add(hc.getHostName());
+ }
+ }
+ }
+
+ if (!hosts.isEmpty()) {
+ String foundHost = hosts.toArray(new String[hosts.size()])[0];
+ prerequisiteCheck.getFailedOn().add(HDFS_SERVICE_NAME);
+ prerequisiteCheck.setStatus(PrereqCheckStatus.FAIL);
+ String failReason = getFailReason(prerequisiteCheck, request);
+ prerequisiteCheck.setFailReason(String.format(failReason, foundHost));
+ }
+ }
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesMapReduceDistributedCacheCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesMapReduceDistributedCacheCheck.java
new file mode 100644
index 00000000000..e24e669863a
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesMapReduceDistributedCacheCheck.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.checks;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.PrereqCheckRequest;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.DesiredConfig;
+import org.apache.ambari.server.state.stack.PrereqCheckStatus;
+import org.apache.ambari.server.state.stack.PrerequisiteCheck;
+import org.apache.ambari.server.state.stack.UpgradePack.PrerequisiteCheckConfig;
+import org.apache.commons.lang.StringUtils;
+
+import com.google.common.collect.Sets;
+import com.google.inject.Singleton;
+
+/**
+ * Checks that MR jobs reference hadoop libraries from the distributed cache.
+ */
+@Singleton
+@UpgradeCheck(group = UpgradeCheckGroup.NAMENODE_HA, order = 17.1f)
+public class ServicesMapReduceDistributedCacheCheck extends AbstractCheckDescriptor {
+
+ static final String KEY_APP_CLASSPATH = "app_classpath";
+ static final String KEY_FRAMEWORK_PATH = "framework_path";
+ static final String KEY_NOT_DFS = "not_dfs";
+ static final String DFS_PROTOCOLS_REGEX_PROPERTY_NAME = "dfs-protocols-regex";
+ static final String DFS_PROTOCOLS_REGEX_DEFAULT = "^([^:]*dfs|wasb|ecs):.*";
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public Set getApplicableServices() {
+ return Sets.newHashSet("YARN");
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public List getQualifications() {
+ return Arrays.asList(
+ new PriorCheckQualification(CheckDescription.SERVICES_NAMENODE_HA));
+ }
+
+ /**
+ * Constructor.
+ */
+ public ServicesMapReduceDistributedCacheCheck() {
+ super(CheckDescription.SERVICES_MR_DISTRIBUTED_CACHE);
+ }
+
+ @Override
+ public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException {
+ String dfsProtocolsRegex = DFS_PROTOCOLS_REGEX_DEFAULT;
+ PrerequisiteCheckConfig prerequisiteCheckConfig = request.getPrerequisiteCheckConfig();
+ Map checkProperties = null;
+ if(prerequisiteCheckConfig != null) {
+ checkProperties = prerequisiteCheckConfig.getCheckProperties(this.getClass().getName());
+ }
+ if(checkProperties != null && checkProperties.containsKey(DFS_PROTOCOLS_REGEX_PROPERTY_NAME)) {
+ dfsProtocolsRegex = checkProperties.get(DFS_PROTOCOLS_REGEX_PROPERTY_NAME);
+ }
+
+ final String clusterName = request.getClusterName();
+ final Cluster cluster = clustersProvider.get().getCluster(clusterName);
+ final String mrConfigType = "mapred-site";
+ final String coreSiteConfigType = "core-site";
+ final Map desiredConfigs = cluster.getDesiredConfigs();
+
+ final DesiredConfig mrDesiredConfig = desiredConfigs.get(mrConfigType);
+ final DesiredConfig coreSiteDesiredConfig = desiredConfigs.get(coreSiteConfigType);
+ final Config mrConfig = cluster.getConfig(mrConfigType, mrDesiredConfig.getTag());
+ final Config coreSiteConfig = cluster.getConfig(coreSiteConfigType, coreSiteDesiredConfig.getTag());
+ final String applicationClasspath = mrConfig.getProperties().get("mapreduce.application.classpath");
+ final String frameworkPath = mrConfig.getProperties().get("mapreduce.application.framework.path");
+ final String defaultFS = coreSiteConfig.getProperties().get("fs.defaultFS");
+
+ List errorMessages = new ArrayList<>();
+ if (applicationClasspath == null || applicationClasspath.isEmpty()) {
+ errorMessages.add(getFailReason(KEY_APP_CLASSPATH, prerequisiteCheck, request));
+ }
+
+ if (frameworkPath == null || frameworkPath.isEmpty()) {
+ errorMessages.add(getFailReason(KEY_FRAMEWORK_PATH, prerequisiteCheck, request));
+ }
+
+ if (!errorMessages.isEmpty()) {
+ prerequisiteCheck.getFailedOn().add("MAPREDUCE2");
+ prerequisiteCheck.setStatus(PrereqCheckStatus.FAIL);
+ prerequisiteCheck.setFailReason(StringUtils.join(errorMessages, " "));
+ return;
+ }
+
+ if (!frameworkPath.matches(dfsProtocolsRegex) && (defaultFS == null || !defaultFS.matches(dfsProtocolsRegex))) {
+ prerequisiteCheck.getFailedOn().add("MAPREDUCE2");
+ prerequisiteCheck.setStatus(PrereqCheckStatus.FAIL);
+ prerequisiteCheck.setFailReason(getFailReason(KEY_NOT_DFS, prerequisiteCheck, request));
+ }
+ }
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesNamenodeHighAvailabilityCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesNamenodeHighAvailabilityCheck.java
new file mode 100644
index 00000000000..ecd88edde01
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesNamenodeHighAvailabilityCheck.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.checks;
+
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.PrereqCheckRequest;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.DesiredConfig;
+import org.apache.ambari.server.state.stack.PrereqCheckStatus;
+import org.apache.ambari.server.state.stack.PrerequisiteCheck;
+
+import com.google.common.collect.Sets;
+import com.google.inject.Singleton;
+
+/**
+ * Checks that namenode high availability is enabled.
+ */
+@Singleton
+@UpgradeCheck(group = UpgradeCheckGroup.NAMENODE_HA, order = 16.1f)
+public class ServicesNamenodeHighAvailabilityCheck extends AbstractCheckDescriptor {
+
+ /**
+ * Constructor.
+ */
+ public ServicesNamenodeHighAvailabilityCheck() {
+ super(CheckDescription.SERVICES_NAMENODE_HA);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public Set getApplicableServices() {
+ return Sets.newHashSet("HDFS");
+ }
+
+
+ @Override
+ public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException {
+ final String clusterName = request.getClusterName();
+ final Cluster cluster = clustersProvider.get().getCluster(clusterName);
+ final String configType = "hdfs-site";
+ final Map desiredConfigs = cluster.getDesiredConfigs();
+ final DesiredConfig desiredConfig = desiredConfigs.get(configType);
+ final Config config = cluster.getConfig(configType, desiredConfig.getTag());
+ if (!config.getProperties().containsKey("dfs.internal.nameservices")) {
+ prerequisiteCheck.getFailedOn().add("HDFS");
+ prerequisiteCheck.setStatus(PrereqCheckStatus.FAIL);
+ prerequisiteCheck.setFailReason(getFailReason(prerequisiteCheck, request));
+ }
+ }
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesNamenodeTruncateCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesNamenodeTruncateCheck.java
new file mode 100644
index 00000000000..4d9e7d72aba
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesNamenodeTruncateCheck.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.checks;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.PrereqCheckRequest;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.stack.PrereqCheckStatus;
+import org.apache.ambari.server.state.stack.PrerequisiteCheck;
+
+import com.google.common.collect.Sets;
+import com.google.inject.Singleton;
+
+/**
+ * Checks that namenode high availability is enabled.
+ */
+@Singleton
+@UpgradeCheck(group = UpgradeCheckGroup.NAMENODE_HA, order = 16.2f)
+public class ServicesNamenodeTruncateCheck extends AbstractCheckDescriptor {
+
+ /**
+ * Constructor.
+ */
+ public ServicesNamenodeTruncateCheck() {
+ super(CheckDescription.SERVICES_NAMENODE_TRUNCATE);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public Set getApplicableServices() {
+ return Sets.newHashSet("HDFS");
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public List getQualifications() {
+ return Arrays.asList(
+ new PriorCheckQualification(CheckDescription.SERVICES_NAMENODE_HA));
+ }
+
+ @Override
+ public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException {
+ final String clusterName = request.getClusterName();
+ final Cluster cluster = clustersProvider.get().getCluster(clusterName);
+ Config config = cluster.getDesiredConfigByType("hdfs-site");
+
+ String truncateEnabled = config.getProperties().get("dfs.allow.truncate");
+
+ if (Boolean.valueOf(truncateEnabled)) {
+ prerequisiteCheck.getFailedOn().add("HDFS");
+ PrereqCheckStatus checkStatus = PrereqCheckStatus.FAIL;
+ prerequisiteCheck.setStatus(checkStatus);
+ prerequisiteCheck.setFailReason(getFailReason(prerequisiteCheck, request));
+ }
+ }
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesTezDistributedCacheCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesTezDistributedCacheCheck.java
new file mode 100644
index 00000000000..8331ebf9540
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesTezDistributedCacheCheck.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.checks;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.PrereqCheckRequest;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.DesiredConfig;
+import org.apache.ambari.server.state.stack.PrereqCheckStatus;
+import org.apache.ambari.server.state.stack.PrerequisiteCheck;
+import org.apache.ambari.server.state.stack.UpgradePack.PrerequisiteCheckConfig;
+import org.apache.commons.lang.StringUtils;
+
+import com.google.common.collect.Sets;
+import com.google.inject.Singleton;
+
+/**
+ * Checks that Tez jobs reference hadoop libraries from the distributed cache.
+ */
+@Singleton
+@UpgradeCheck(group = UpgradeCheckGroup.NAMENODE_HA, order = 21.0f)
+public class ServicesTezDistributedCacheCheck extends AbstractCheckDescriptor {
+
+ static final String KEY_LIB_URI_MISSING = "tez_lib_uri_missing";
+ static final String KEY_USE_HADOOP_LIBS = "tez_use_hadoop_libs";
+ static final String KEY_LIB_NOT_DFS = "lib_not_dfs";
+ static final String KEY_LIB_NOT_TARGZ = "lib_not_targz";
+ static final String KEY_USE_HADOOP_LIBS_FALSE = "tez_use_hadoop_libs_false";
+ static final String DFS_PROTOCOLS_REGEX_PROPERTY_NAME = "dfs-protocols-regex";
+ static final String DFS_PROTOCOLS_REGEX_DEFAULT = "^([^:]*dfs|wasb|ecs):.*";
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public Set getApplicableServices() {
+ return Sets.newHashSet("TEZ");
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public List getQualifications() {
+ return Arrays.asList(
+ new PriorCheckQualification(CheckDescription.SERVICES_NAMENODE_HA));
+ }
+
+ /**
+ * Constructor.
+ */
+ public ServicesTezDistributedCacheCheck() {
+ super(CheckDescription.SERVICES_TEZ_DISTRIBUTED_CACHE);
+ }
+
+ @Override
+ public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException {
+ String dfsProtocolsRegex = DFS_PROTOCOLS_REGEX_DEFAULT;
+ PrerequisiteCheckConfig prerequisiteCheckConfig = request.getPrerequisiteCheckConfig();
+ Map checkProperties = null;
+ if(prerequisiteCheckConfig != null) {
+ checkProperties = prerequisiteCheckConfig.getCheckProperties(this.getClass().getName());
+ }
+ if(checkProperties != null && checkProperties.containsKey(DFS_PROTOCOLS_REGEX_PROPERTY_NAME)) {
+ dfsProtocolsRegex = checkProperties.get(DFS_PROTOCOLS_REGEX_PROPERTY_NAME);
+ }
+
+ final String clusterName = request.getClusterName();
+ final Cluster cluster = clustersProvider.get().getCluster(clusterName);
+ final String tezConfigType = "tez-site";
+ final String coreSiteConfigType = "core-site";
+ final Map desiredConfigs = cluster.getDesiredConfigs();
+
+ final DesiredConfig tezDesiredConfig = desiredConfigs.get(tezConfigType);
+ final Config tezConfig = cluster.getConfig(tezConfigType, tezDesiredConfig.getTag());
+ final DesiredConfig coreSiteDesiredConfig = desiredConfigs.get(coreSiteConfigType);
+ final Config coreSiteConfig = cluster.getConfig(coreSiteConfigType, coreSiteDesiredConfig.getTag());
+ final String libUris = tezConfig.getProperties().get("tez.lib.uris");
+ final String useHadoopLibs = tezConfig.getProperties().get("tez.use.cluster.hadoop-libs");
+ final String defaultFS = coreSiteConfig.getProperties().get("fs.defaultFS");
+
+ List errorMessages = new ArrayList<>();
+ if (libUris == null || libUris.isEmpty()) {
+ errorMessages.add(getFailReason(KEY_LIB_URI_MISSING, prerequisiteCheck, request));
+ }
+
+ if (useHadoopLibs == null || useHadoopLibs.isEmpty()) {
+ errorMessages.add(getFailReason(KEY_USE_HADOOP_LIBS, prerequisiteCheck, request));
+ }
+
+ if (!errorMessages.isEmpty()) {
+ prerequisiteCheck.getFailedOn().add("TEZ");
+ prerequisiteCheck.setStatus(PrereqCheckStatus.FAIL);
+ prerequisiteCheck.setFailReason(StringUtils.join(errorMessages, " "));
+ return;
+ }
+
+ if (!libUris.matches(dfsProtocolsRegex) && (defaultFS == null || !defaultFS.matches(dfsProtocolsRegex))) {
+ errorMessages.add(getFailReason(KEY_LIB_NOT_DFS, prerequisiteCheck, request));
+ }
+
+ if (!libUris.contains("tar.gz")) {
+ errorMessages.add(getFailReason(KEY_LIB_NOT_TARGZ, prerequisiteCheck, request));
+ }
+
+ if (Boolean.parseBoolean(useHadoopLibs)) {
+ errorMessages.add(getFailReason(KEY_USE_HADOOP_LIBS_FALSE, prerequisiteCheck, request));
+ }
+
+ if (!errorMessages.isEmpty()) {
+ prerequisiteCheck.getFailedOn().add("TEZ");
+ prerequisiteCheck.setStatus(PrereqCheckStatus.FAIL);
+ prerequisiteCheck.setFailReason(StringUtils.join(errorMessages, " "));
+ }
+ }
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesYarnWorkPreservingCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesYarnWorkPreservingCheck.java
new file mode 100644
index 00000000000..0b102a99c8a
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/ServicesYarnWorkPreservingCheck.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.checks;
+
+import java.util.Set;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.PrereqCheckRequest;
+import org.apache.ambari.server.state.stack.PrereqCheckStatus;
+import org.apache.ambari.server.state.stack.PrerequisiteCheck;
+import org.apache.commons.lang.BooleanUtils;
+
+import com.google.common.collect.Sets;
+import com.google.inject.Singleton;
+
+/**
+ * Checks that YARN has work-preserving restart enabled.
+ */
+@Singleton
+@UpgradeCheck(group = UpgradeCheckGroup.DEFAULT, order = 17.1f)
+public class ServicesYarnWorkPreservingCheck extends AbstractCheckDescriptor {
+
+ /**
+ * Constructor.
+ */
+ public ServicesYarnWorkPreservingCheck() {
+ super(CheckDescription.SERVICES_YARN_WP);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public Set getApplicableServices() {
+ return Sets.newHashSet("YARN");
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException {
+ String propertyValue = getProperty(request, "yarn-site",
+ "yarn.resourcemanager.work-preserving-recovery.enabled");
+
+ if (null == propertyValue || !BooleanUtils.toBoolean(propertyValue)) {
+ prerequisiteCheck.getFailedOn().add("YARN");
+ prerequisiteCheck.setStatus(PrereqCheckStatus.FAIL);
+ prerequisiteCheck.setFailReason(getFailReason(prerequisiteCheck, request));
+ }
+ }
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/StormShutdownWarning.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/StormShutdownWarning.java
new file mode 100644
index 00000000000..067cd8043a1
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/StormShutdownWarning.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.checks;
+
+import java.util.Set;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.PrereqCheckRequest;
+import org.apache.ambari.server.state.stack.PrereqCheckStatus;
+import org.apache.ambari.server.state.stack.PrerequisiteCheck;
+import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
+
+import com.google.common.collect.Sets;
+import com.google.inject.Singleton;
+
+/**
+ * The {@link StormShutdownWarning} to see if Storm is installed and if the
+ * upgrade type is {@link UpgradeType#ROLLING}. If so, then a
+ * {@link PrereqCheckStatus#WARNING} is produced which will let the operator
+ * know that Storm cannot be rolling on ceratin versions of the HDP stack.
+ *
+ * The upgrade packs must include this check where it is applicable. It contains
+ * no logic for determine stack versions and only checks for the presence of
+ * Storm and the type of upgrade.
+ */
+@Singleton
+@UpgradeCheck(group = UpgradeCheckGroup.INFORMATIONAL_WARNING, required = UpgradeType.ROLLING)
+public class StormShutdownWarning extends AbstractCheckDescriptor {
+
+ /**
+ * Constructor.
+ */
+ public StormShutdownWarning() {
+ super(CheckDescription.SERVICES_STORM_ROLLING_WARNING);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public Set getApplicableServices() {
+ return Sets.newHashSet("STORM");
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException {
+ prerequisiteCheck.getFailedOn().add("STORM");
+ prerequisiteCheck.setStatus(PrereqCheckStatus.WARNING);
+ prerequisiteCheck.setFailReason(getFailReason(prerequisiteCheck, request));
+ }
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/YarnRMHighAvailabilityCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/YarnRMHighAvailabilityCheck.java
new file mode 100644
index 00000000000..e0d3df7fc16
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/YarnRMHighAvailabilityCheck.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.checks;
+
+import java.util.Set;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.PrereqCheckRequest;
+import org.apache.ambari.server.state.stack.PrereqCheckStatus;
+import org.apache.ambari.server.state.stack.PrerequisiteCheck;
+import org.apache.commons.lang.BooleanUtils;
+
+import com.google.common.collect.Sets;
+import com.google.inject.Singleton;
+
+/**
+ * The {@link YarnRMHighAvailabilityCheck} checks that YARN has HA mode enabled
+ * for ResourceManager..
+ */
+@Singleton
+@UpgradeCheck(group = UpgradeCheckGroup.MULTIPLE_COMPONENT_WARNING, order = 17.2f)
+public class YarnRMHighAvailabilityCheck extends AbstractCheckDescriptor {
+
+ /**
+ * Constructor.
+ */
+ public YarnRMHighAvailabilityCheck() {
+ super(CheckDescription.SERVICES_YARN_RM_HA);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public Set getApplicableServices() {
+ return Sets.newHashSet("YARN");
+ }
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException {
+ // pretty weak sauce here; probably should do a bit more, like query JMX to
+ // see that there is at least 1 RM active and 1 in standby
+ String propertyValue = getProperty(request, "yarn-site", "yarn.resourcemanager.ha.enabled");
+
+ if (null == propertyValue || !BooleanUtils.toBoolean(propertyValue)) {
+ prerequisiteCheck.getFailedOn().add("YARN");
+ prerequisiteCheck.setStatus(PrereqCheckStatus.WARNING);
+ prerequisiteCheck.setFailReason(getFailReason(prerequisiteCheck, request));
+ }
+ }
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/YarnTimelineServerStatePreservingCheck.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/YarnTimelineServerStatePreservingCheck.java
new file mode 100644
index 00000000000..27d4ace8847
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/YarnTimelineServerStatePreservingCheck.java
@@ -0,0 +1,136 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.checks;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.PrereqCheckRequest;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.stack.PrereqCheckStatus;
+import org.apache.ambari.server.state.stack.PrerequisiteCheck;
+import org.apache.ambari.server.state.stack.UpgradePack.PrerequisiteCheckConfig;
+import org.apache.ambari.server.utils.VersionUtils;
+import org.apache.commons.lang.BooleanUtils;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import com.google.inject.Singleton;
+
+/**
+ * The {@link YarnTimelineServerStatePreservingCheck} is used to check that the
+ * YARN Timeline server has state preserving mode enabled. This value is only
+ * present in HDP 2.2.4.2+.
+ */
+@Singleton
+@UpgradeCheck(group = UpgradeCheckGroup.DEFAULT, order = 17.3f)
+public class YarnTimelineServerStatePreservingCheck extends AbstractCheckDescriptor {
+
+ private final static String YARN_TIMELINE_STATE_RECOVERY_ENABLED_KEY = "yarn.timeline-service.recovery.enabled";
+ private final static String MIN_APPLICABLE_STACK_VERSION_PROPERTY_NAME = "min-applicable-stack-version";
+
+ /**
+ * Constructor.
+ */
+ public YarnTimelineServerStatePreservingCheck() {
+ super(CheckDescription.SERVICES_YARN_TIMELINE_ST);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public Set getApplicableServices() {
+ return Sets.newHashSet("YARN");
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public List getQualifications() {
+ return Lists.newArrayList(new YarnTimelineServerMinVersionQualification());
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void perform(PrerequisiteCheck prerequisiteCheck, PrereqCheckRequest request) throws AmbariException {
+ String propertyValue = getProperty(request, "yarn-site",
+ YARN_TIMELINE_STATE_RECOVERY_ENABLED_KEY);
+
+ if (null == propertyValue || !BooleanUtils.toBoolean(propertyValue)) {
+ prerequisiteCheck.getFailedOn().add("YARN");
+ prerequisiteCheck.setStatus(PrereqCheckStatus.FAIL);
+ prerequisiteCheck.setFailReason(getFailReason(prerequisiteCheck, request));
+ }
+ }
+
+ /**
+ * The {@link YarnTimelineServerMinVersionQualification} is used to determine
+ * if the ATS component needs to have the
+ * {@value #MIN_APPLICABLE_STACK_VERSION_PROPERTY_NAME} set.
+ */
+ private class YarnTimelineServerMinVersionQualification implements CheckQualification {
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public boolean isApplicable(PrereqCheckRequest request) throws AmbariException {
+ final Cluster cluster = clustersProvider.get().getCluster(request.getClusterName());
+
+ String minApplicableStackVersion = null;
+ PrerequisiteCheckConfig prerequisiteCheckConfig = request.getPrerequisiteCheckConfig();
+ Map checkProperties = null;
+ if(prerequisiteCheckConfig != null) {
+ checkProperties = prerequisiteCheckConfig.getCheckProperties(this.getClass().getName());
+ }
+
+ if(checkProperties != null && checkProperties.containsKey(MIN_APPLICABLE_STACK_VERSION_PROPERTY_NAME)) {
+ minApplicableStackVersion = checkProperties.get(MIN_APPLICABLE_STACK_VERSION_PROPERTY_NAME);
+ }
+
+ // Due to the introduction of YARN Timeline state recovery only from certain
+ // stack-versions onwards, this check is not applicable to earlier versions
+ // of the stack.
+ // Applicable only if min-applicable-stack-version config property is not defined, or
+ // version equals or exceeds the configured version.
+ if(minApplicableStackVersion != null && !minApplicableStackVersion.isEmpty()) {
+ String[] minStack = minApplicableStackVersion.split("-");
+ if(minStack.length == 2) {
+ String minStackName = minStack[0];
+ String minStackVersion = minStack[1];
+ Service yarnService = cluster.getService("YARN");
+ String stackName = yarnService.getDesiredStackId().getStackName();
+ if (minStackName.equals(stackName)) {
+ String currentRepositoryVersion = yarnService.getDesiredRepositoryVersion().getVersion();
+ return VersionUtils.compareVersions(currentRepositoryVersion, minStackVersion) >= 0;
+ }
+ }
+ }
+
+ return true;
+
+ }
+ }
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
index 91189aff7dd..5334b7c8390 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
@@ -23,14 +23,11 @@
import java.util.List;
import java.util.Map;
-import org.apache.ambari.annotations.Experimental;
-import org.apache.ambari.annotations.ExperimentalFeature;
import org.apache.ambari.server.actionmanager.TargetHostType;
import org.apache.ambari.server.agent.ExecutionCommand;
import org.apache.ambari.server.controller.internal.RequestOperationLevel;
import org.apache.ambari.server.controller.internal.RequestResourceFilter;
import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
-import org.apache.ambari.server.state.Mpack;
/**
* The context required to create tasks and stages for a custom action
@@ -49,7 +46,6 @@ public class ActionExecutionContext {
private boolean hostsInMaintenanceModeExcluded = true;
private boolean allowRetry = false;
private RepositoryVersionEntity repositoryVersion;
- private Mpack mpack = null;
private List m_visitors = new ArrayList<>();
@@ -194,8 +190,6 @@ public void setAutoSkipFailures(boolean autoSkipFailures) {
*
* @return
*/
- @Deprecated
- @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL)
public RepositoryVersionEntity getRepositoryVersion() {
return repositoryVersion;
}
@@ -208,30 +202,10 @@ public RepositoryVersionEntity getRepositoryVersion() {
* @param stackId
* the stackId to use for stack-based properties on the command.
*/
- @Deprecated
- @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL)
public void setRepositoryVersion(RepositoryVersionEntity repositoryVersion) {
this.repositoryVersion = repositoryVersion;
}
- /**
- * Sets the management pack for this command. This can be used to version and
- * stack information.
- *
- * @param mpack
- */
- public void setMpack(Mpack mpack) {
- this.mpack = mpack;
- }
-
- /**
- * Gets the management pack associated with this command. This can be used for
- * version and stack information.
- */
- public Mpack getMpack() {
- return mpack;
- }
-
/**
* Adds a command visitor that will be invoked after a command is created. Provides access
* to the command.
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
index 56e6968b90f..a46e2c4d4dd 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
@@ -52,6 +52,7 @@
import org.apache.ambari.server.state.ServiceComponentHost;
import org.apache.ambari.server.state.ServiceInfo;
import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper;
import org.apache.ambari.server.state.svccomphost.ServiceComponentHostOpInProgressEvent;
import org.apache.ambari.server.utils.SecretReference;
import org.apache.ambari.server.utils.StageUtils;
@@ -91,6 +92,10 @@ public class AmbariActionExecutionHelper {
@Inject
private Configuration configs;
+ @Inject
+ private RepositoryVersionHelper repoVersionHelper;
+
+
/**
* Validates the request to execute an action.
* @param actionRequest
@@ -466,6 +471,15 @@ public boolean shouldHostBeRemoved(final String hostname)
hostLevelParams.put(previousDBConnectorName.getKey(), previousDBConnectorName.getValue());
}
+ if (StringUtils.isNotBlank(serviceName)) {
+ Service service = cluster.getService(serviceName);
+ repoVersionHelper.addRepoInfoToHostLevelParams(cluster, actionContext, service.getDesiredRepositoryVersion(),
+ hostLevelParams, hostName);
+ } else {
+ repoVersionHelper.addRepoInfoToHostLevelParams(cluster, actionContext, null, hostLevelParams, hostName);
+ }
+
+
Map roleParams = execCmd.getRoleParams();
if (roleParams == null) {
roleParams = new TreeMap<>();
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
index fdd20425395..9625f101269 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
@@ -33,6 +33,7 @@
import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.MYSQL_JDBC_URL;
import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.NOT_MANAGED_HDFS_PATH_LIST;
import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.ORACLE_JDBC_URL;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.REPO_INFO;
import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCRIPT;
import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCRIPT_TYPE;
import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_NAME;
@@ -70,6 +71,7 @@
import org.apache.ambari.server.controller.internal.RequestOperationLevel;
import org.apache.ambari.server.controller.internal.RequestResourceFilter;
import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.spi.SystemException;
import org.apache.ambari.server.metadata.ActionMetadata;
import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
import org.apache.ambari.server.state.Cluster;
@@ -413,6 +415,12 @@ public boolean shouldHostBeRemoved(final String hostname)
hostLevelParams.put(CUSTOM_COMMAND, commandName);
+ // Set parameters required for re-installing clients on restart
+ try {
+ hostLevelParams.put(REPO_INFO, repoVersionHelper.getRepoInfo(cluster, component, host));
+ } catch (SystemException e) {
+ throw new AmbariException("", e);
+ }
hostLevelParams.put(STACK_NAME, stackId.getStackName());
hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
@@ -1434,6 +1442,7 @@ Map createDefaultHostParams(Cluster cluster, StackId stackId) th
hostLevelParams.put(MYSQL_JDBC_URL, managementController.getMysqljdbcUrl());
hostLevelParams.put(ORACLE_JDBC_URL, managementController.getOjdbcUrl());
hostLevelParams.put(DB_DRIVER_FILENAME, configs.getMySQLJarName());
+ hostLevelParams.putAll(managementController.getRcaParameters());
hostLevelParams.put(HOST_SYS_PREPPED, configs.areHostsSysPrepped());
hostLevelParams.put(AGENT_STACK_RETRY_ON_UNAVAILABILITY, configs.isAgentStackRetryOnInstallEnabled());
hostLevelParams.put(AGENT_STACK_RETRY_COUNT, configs.getAgentStackRetryOnInstallCount());
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
index 0e5fb727911..9a1bb921215 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
@@ -509,19 +509,6 @@ RequestStatusResponse createAction(ExecuteActionRequest actionRequest, Map> findConfigurationTagsWithOverrides(
Cluster cluster, String hostName) throws AmbariException;
+ /**
+ * Returns parameters for RCA database
+ *
+ * @return the map with parameters for RCA db
+ *
+ */
+ Map getRcaParameters();
+
/**
* Get the Factory to create Request schedules
* @return the request execution factory
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 6a45f187303..9f5acbfdf94 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -18,6 +18,10 @@
package org.apache.ambari.server.controller;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AMBARI_DB_RCA_DRIVER;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AMBARI_DB_RCA_PASSWORD;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AMBARI_DB_RCA_URL;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AMBARI_DB_RCA_USERNAME;
import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.CLIENTS_TO_UPDATE_CONFIGS;
import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.COMMAND_RETRY_ENABLED;
import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.COMMAND_TIMEOUT;
@@ -27,8 +31,11 @@
import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.MAX_DURATION_OF_RETRIES;
import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.NOT_MANAGED_HDFS_PATH_LIST;
import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.PACKAGE_LIST;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.PACKAGE_VERSION;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.REPO_INFO;
import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCRIPT;
import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCRIPT_TYPE;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SERVICE_REPO_INFO;
import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.UNLIMITED_KEY_JCE_REQUIRED;
import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.USER_GROUPS;
import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.USER_LIST;
@@ -83,6 +90,7 @@
import org.apache.ambari.server.actionmanager.Stage;
import org.apache.ambari.server.actionmanager.StageFactory;
import org.apache.ambari.server.agent.ExecutionCommand;
+import org.apache.ambari.server.agent.ExecutionCommand.KeyNames;
import org.apache.ambari.server.agent.rest.AgentResource;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.api.services.LoggingService;
@@ -101,6 +109,7 @@
import org.apache.ambari.server.controller.metrics.timeline.cache.TimelineMetricCacheProvider;
import org.apache.ambari.server.controller.spi.Resource;
import org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException;
+import org.apache.ambari.server.controller.spi.SystemException;
import org.apache.ambari.server.customactions.ActionDefinition;
import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
import org.apache.ambari.server.metadata.ActionMetadata;
@@ -110,9 +119,7 @@
import org.apache.ambari.server.orm.dao.ClusterServiceDAO;
import org.apache.ambari.server.orm.dao.ExtensionDAO;
import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
-import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
-import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO;
import org.apache.ambari.server.orm.dao.SettingDAO;
import org.apache.ambari.server.orm.dao.StackDAO;
import org.apache.ambari.server.orm.dao.WidgetDAO;
@@ -120,13 +127,11 @@
import org.apache.ambari.server.orm.entities.ClusterEntity;
import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
import org.apache.ambari.server.orm.entities.ExtensionLinkEntity;
-import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
import org.apache.ambari.server.orm.entities.HostEntity;
import org.apache.ambari.server.orm.entities.MpackEntity;
import org.apache.ambari.server.orm.entities.RepoDefinitionEntity;
import org.apache.ambari.server.orm.entities.RepoOsEntity;
import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
-import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
import org.apache.ambari.server.orm.entities.SettingEntity;
import org.apache.ambari.server.orm.entities.StackEntity;
import org.apache.ambari.server.orm.entities.WidgetEntity;
@@ -183,7 +188,6 @@
import org.apache.ambari.server.state.ServiceComponentHostEvent;
import org.apache.ambari.server.state.ServiceComponentHostFactory;
import org.apache.ambari.server.state.ServiceFactory;
-import org.apache.ambari.server.state.ServiceGroup;
import org.apache.ambari.server.state.ServiceGroupFactory;
import org.apache.ambari.server.state.ServiceInfo;
import org.apache.ambari.server.state.StackId;
@@ -331,13 +335,6 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
@Inject
private ClusterServiceDAO clusterServiceDAO;
-
- @Inject
- private ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO;
-
- @Inject
- private HostComponentStateDAO hostComponentStateDAO;
-
@Inject
private ExtensionDAO extensionDAO;
@Inject
@@ -399,9 +396,6 @@ public AmbariManagementControllerImpl(ActionManager actionManager,
masterHostname = InetAddress.getLocalHost().getCanonicalHostName();
maintenanceStateHelper = injector.getInstance(MaintenanceStateHelper.class);
kerberosHelper = injector.getInstance(KerberosHelper.class);
- hostComponentStateDAO = injector.getInstance(HostComponentStateDAO.class);
- serviceComponentDesiredStateDAO = injector.getInstance(ServiceComponentDesiredStateDAO.class);
-
if(configs != null)
{
if (configs.getApiSSLAuthentication()) {
@@ -630,13 +624,6 @@ public synchronized Set createHostComponents(Set createHostComponents(Set getHostComponents(
}
if (StringUtils.isBlank(serviceName)) {
- LOG.error("Unable to find service for componentName : {}", request.getComponentName());
+ LOG.error("Unable to find service for component {}", request.getComponentName());
throw new ServiceComponentHostNotFoundException(
cluster.getClusterName(), null, request.getComponentName(), request.getHostname());
}
@@ -1333,31 +1318,6 @@ private Set getHostComponents(
Map desiredConfigs = cluster.getDesiredConfigs();
Map hosts = clusters.getHostsForCluster(cluster.getClusterName());
- /*
- This is a core step in retrieving a given component instance in multi-host component instances world.
- We fetch the 'HostComponentStateEntity' based on the 'host component Id' passed-in in the request. if it exists,
- we use the service group Id, service Id, componentName and componentType to query the unique ServiceComponentEntity
- associated with it.
- */
- ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity = null;
- HostComponentStateEntity hostComponentStateEntity = null;
- if (request.getComponentId() != null) {
- hostComponentStateEntity = hostComponentStateDAO.findById(request.getComponentId());
- if (hostComponentStateEntity == null) {
- throw new AmbariException("Could not find Host Component resource for"
- + " componentId = "+ request.getComponentId());
- }
- serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByName(hostComponentStateEntity.getClusterId(),
- hostComponentStateEntity.getServiceGroupId(), hostComponentStateEntity.getServiceId(),
- hostComponentStateEntity.getComponentName(), hostComponentStateEntity.getComponentType());
- if (serviceComponentDesiredStateEntity == null) {
- throw new AmbariException("Could not find Service Component resource for"
- + " componentId = " + request.getComponentId() + ", serviceGroupId = " + hostComponentStateEntity.getServiceGroupId()
- + ", serviceId = " + hostComponentStateEntity.getServiceId() + ", componentName = " + hostComponentStateEntity.getComponentName()
- + ", componntType = " + hostComponentStateEntity.getComponentType());
- }
- }
-
for (Service s : services) {
// filter on component name if provided
Set components = new HashSet<>();
@@ -1366,12 +1326,9 @@ private Set getHostComponents(
} else {
components.addAll(s.getServiceComponents().values());
}
-
for (ServiceComponent sc : components) {
- if (serviceComponentDesiredStateEntity != null &&
- serviceComponentDesiredStateEntity.getId() != null &&
- sc.getId() != null) {
- if (!sc.getId().equals(serviceComponentDesiredStateEntity.getId())) {
+ if (request.getComponentName() != null) {
+ if (!sc.getName().equals(request.getComponentName())) {
continue;
}
}
@@ -1431,7 +1388,7 @@ private Set getHostComponents(
response.add(r);
} catch (ServiceComponentHostNotFoundException e) {
- if (request.getServiceName() == null || request.getComponentId() == null) {
+ if (request.getServiceName() == null || request.getComponentName() == null) {
// Ignore the exception if either the service name or component name are not specified.
// This is an artifact of how we get host_components and can happen in the case where
// we get all host_components for a host, for example.
@@ -1443,7 +1400,7 @@ private Set getHostComponents(
// condition.
LOG.debug("ServiceComponentHost not found ", e);
throw new ServiceComponentHostNotFoundException(cluster.getClusterName(),
- request.getServiceName(), request.getComponentId(), request.getHostname());
+ request.getServiceName(), request.getComponentName(), request.getHostname());
}
}
} else {
@@ -2437,6 +2394,7 @@ private void createHostAction(Cluster cluster,
Map commandParamsInp,
ServiceComponentHostEvent event,
boolean skipFailure,
+ RepositoryVersionEntity repoVersion,
boolean isUpgradeSuspended,
DatabaseType databaseType,
Map clusterDesiredConfigs
@@ -2603,7 +2561,34 @@ private void createHostAction(Cluster cluster,
}
StageUtils.useAmbariJdkInCommandParams(commandParams, configs);
+ String repoInfo;
+ try {
+ repoInfo = repoVersionHelper.getRepoInfo(cluster, component, host);
+ } catch (SystemException e) {
+ throw new AmbariException("", e);
+ }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Sending repo information to agent, hostname={}, clusterName={}, stackInfo={}, repoInfo={}",
+ scHost.getHostName(), clusterName, stackId.getStackId(), repoInfo);
+ }
+
Map hostParams = new TreeMap<>();
+ hostParams.put(REPO_INFO, repoInfo);
+ hostParams.putAll(getRcaParameters());
+
+ if (null != repoVersion) {
+ try {
+ VersionDefinitionXml xml = repoVersion.getRepositoryXml();
+ if (null != xml && !StringUtils.isBlank(xml.getPackageVersion(osFamily))) {
+ hostParams.put(PACKAGE_VERSION, xml.getPackageVersion(osFamily));
+ }
+ } catch (Exception e) {
+ throw new AmbariException(String.format("Could not load version xml from repo version %s",
+ repoVersion.getVersion()), e);
+ }
+
+ hostParams.put(KeyNames.REPO_VERSION_ID, repoVersion.getId().toString());
+ }
List packages =
getPackagesForStackServiceHost(ambariMetaInfo.getStack(stackId), serviceInfo, hostParams, osFamily);
@@ -2695,11 +2680,11 @@ private void createHostAction(Cluster cluster,
/**
* Computes os-dependent packages for osSpecificMap. Does not take into
- * account package dependencies for ANY_OS. Instead of this method you should
- * use getPackagesForStackServiceHost() because it takes into account both
- * os-dependent and os-independent lists of packages for stack service.
- *
- * @param hostParams
+ * account package dependencies for ANY_OS. Instead of this method
+ * you should use getPackagesForStackServiceHost()
+ * because it takes into account both os-dependent and os-independent lists
+ * of packages for stack service.
+ * @param hostParams may be modified (appended SERVICE_REPO_INFO)
* @return a list of os-dependent packages for host
*/
protected OsSpecific populatePackagesInfo(Map osSpecificMap, Map hostParams,
@@ -2710,12 +2695,18 @@ protected OsSpecific populatePackagesInfo(Map osSpecificMap,
for (OsSpecific osSpecific : foundOSSpecifics) {
hostOs.addPackages(osSpecific.getPackages());
}
- }
+ //TODO this looks deprecated. Need to investigate if it's actually used
+ // Choose repo that is relevant for host
+ OsSpecific.Repo repos = hostOs.getRepo();
+ if (repos != null) {
+ String serviceRepoInfo = gson.toJson(repos);
+ hostParams.put(SERVICE_REPO_INFO, serviceRepoInfo);
+ }
+ }
return hostOs;
}
- @Override
public List getPackagesForStackServiceHost(StackInfo stackInfo, ServiceInfo serviceInfo, Map hostParams, String osFamily) {
List packages = new ArrayList<>();
//add all packages for ANY_OS
@@ -2958,11 +2949,11 @@ protected RequestStageContainer doStageCreation(RequestStageContainer requestSta
Service service = cluster.getService(scHost.getServiceName());
ServiceComponent serviceComponent = service.getServiceComponent(compName);
- StackId stackId = cluster.getServiceGroup(scHost.getServiceGroupId()).getStackId();
if (StringUtils.isBlank(stage.getHostParamsStage())) {
+ RepositoryVersionEntity repositoryVersion = serviceComponent.getDesiredRepositoryVersion();
stage.setHostParamsStage(StageUtils.getGson().toJson(
- customCommandExecutionHelper.createDefaultHostParams(cluster, stackId)));
+ customCommandExecutionHelper.createDefaultHostParams(cluster, repositoryVersion.getStackId())));
}
@@ -3032,6 +3023,7 @@ protected RequestStageContainer doStageCreation(RequestStageContainer requestSta
}
break;
case STARTED:
+ StackId stackId = serviceComponent.getDesiredStackId();
ComponentInfo compInfo = ambariMetaInfo.getComponent(
stackId.getStackName(), stackId.getStackVersion(), scHost.getServiceType(),
scHost.getServiceComponentName());
@@ -3178,8 +3170,10 @@ protected RequestStageContainer doStageCreation(RequestStageContainer requestSta
}
} else {
// !!! can never be null
+ RepositoryVersionEntity repoVersion = serviceComponent.getDesiredRepositoryVersion();
+
createHostAction(cluster, stage, scHost, configurations, configurationAttributes, configTags,
- roleCommand, requestParameters, event, skipFailure, isUpgradeSuspended,
+ roleCommand, requestParameters, event, skipFailure, repoVersion, isUpgradeSuspended,
databaseType, clusterDesiredConfigs);
}
@@ -3195,12 +3189,9 @@ protected RequestStageContainer doStageCreation(RequestStageContainer requestSta
calculateServiceComponentHostForServiceCheck(cluster, service);
if (StringUtils.isBlank(stage.getHostParamsStage())) {
- long serviceGroupId = componentForServiceCheck.getServiceGroupId();
- ServiceGroup serviceGroup = cluster.getServiceGroup(serviceGroupId);
- StackId stackId = serviceGroup.getStackId();
-
+ RepositoryVersionEntity repositoryVersion = componentForServiceCheck.getServiceComponent().getDesiredRepositoryVersion();
stage.setHostParamsStage(StageUtils.getGson().toJson(
- customCommandExecutionHelper.createDefaultHostParams(cluster, stackId)));
+ customCommandExecutionHelper.createDefaultHostParams(cluster, repositoryVersion.getStackId())));
}
customCommandExecutionHelper.addServiceCheckAction(stage, componentForServiceCheck.getHostName(), smokeTestRole,
@@ -3327,11 +3318,19 @@ public ExecutionCommand getExecutionCommand(Cluster cluster,
configurationAttributes =
new TreeMap<>();
+ RepositoryVersionEntity repoVersion = null;
+ if (null != scHost.getServiceComponent().getDesiredRepositoryVersion()) {
+ repoVersion = scHost.getServiceComponent().getDesiredRepositoryVersion();
+ } else {
+ Service service = cluster.getService(scHost.getServiceName());
+ repoVersion = service.getDesiredRepositoryVersion();
+ }
+
boolean isUpgradeSuspended = cluster.isUpgradeSuspended();
DatabaseType databaseType = configs.getDatabaseType();
Map clusterDesiredConfigs = cluster.getDesiredConfigs();
createHostAction(cluster, stage, scHost, configurations, configurationAttributes, configTags,
- roleCommand, null, null, false, isUpgradeSuspended, databaseType,
+ roleCommand, null, null, false, repoVersion, isUpgradeSuspended, databaseType,
clusterDesiredConfigs);
ExecutionCommand ec = stage.getExecutionCommands().get(scHost.getHostName()).get(0).getExecutionCommand();
@@ -3563,14 +3562,10 @@ public void validateServiceComponentHostRequest(ServiceComponentHostRequest requ
|| request.getClusterName().isEmpty()
|| request.getComponentName() == null
|| request.getComponentName().isEmpty()
- || request.getServiceName() == null
- || request.getServiceName().isEmpty()
- || request.getServiceGroupName() == null
- || request.getServiceGroupName().isEmpty()
|| request.getHostname() == null
|| request.getHostname().isEmpty()) {
throw new IllegalArgumentException("Invalid arguments"
- + ", cluster name, component name, service name, service group name and host name should be"
+ + ", cluster name, component name and host name should be"
+ " provided");
}
@@ -3600,11 +3595,6 @@ public String findService(Cluster cluster, String componentName) throws AmbariEx
return cluster.getServiceByComponentName(componentName).getName();
}
- @Override
- public String findService(Cluster cluster, Long componentId) throws AmbariException {
- return cluster.getServiceByComponentId(componentId).getName();
- }
-
@Override
public synchronized void deleteCluster(ClusterRequest request)
throws AmbariException {
@@ -3645,8 +3635,8 @@ public DeleteStatusMetaData deleteHostComponents(
for (ServiceComponentHost sch : cluster.getServiceComponentHosts(request.getHostname())) {
ServiceComponentHostRequest schr = new ServiceComponentHostRequest(request.getClusterName(),
- sch.getServiceGroupName(), sch.getServiceName(), sch.getServiceComponentId(), sch.getServiceComponentName(),
- sch.getServiceComponentType(), sch.getHostName(), null);
+ request.getServiceGroupName(), sch.getServiceName(), sch.getServiceComponentName(),
+ sch.getHostName(), null);
expanded.add(schr);
}
}
@@ -3672,7 +3662,6 @@ public DeleteStatusMetaData deleteHostComponents(
+ ", clusterName=" + request.getClusterName()
+ ", serviceName=" + request.getServiceName()
+ ", componentName=" + request.getComponentName()
- + ", componentType=" + request.getComponentType()
+ ", hostname=" + request.getHostname()
+ ", request=" + request);
@@ -5089,6 +5078,28 @@ public String getMysqljdbcUrl() {
return mysqljdbcUrl;
}
+ @Override
+ public Map getRcaParameters() {
+
+ String hostName = StageUtils.getHostName();
+
+ String url = configs.getRcaDatabaseUrl();
+ if (url.contains(Configuration.HOSTNAME_MACRO)) {
+ url =
+ url.replace(Configuration.HOSTNAME_MACRO,
+ hostsMap.getHostMap(hostName));
+ }
+
+ Map rcaParameters = new HashMap<>();
+
+ rcaParameters.put(AMBARI_DB_RCA_URL, url);
+ rcaParameters.put(AMBARI_DB_RCA_DRIVER, configs.getRcaDatabaseDriver());
+ rcaParameters.put(AMBARI_DB_RCA_USERNAME, configs.getRcaDatabaseUser());
+ rcaParameters.put(AMBARI_DB_RCA_PASSWORD, configs.getRcaDatabasePassword());
+
+ return rcaParameters;
+ }
+
@Override
public boolean checkLdapConfigured() {
return ldapDataPopulator.isLdapEnabled();
@@ -5793,7 +5804,7 @@ public Set createServiceConfigVersion(Set existingConfigTypeToConfig = new HashMap<>();
+ Map existingConfigTypeToConfig = new HashMap();
for (Config config : configs) {
Config existingConfig = cluster.getDesiredConfigByType(config.getType());
existingConfigTypeToConfig.put(config.getType(), existingConfig);
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
index f7f26833108..e93277e541f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
@@ -81,6 +81,7 @@
import org.apache.ambari.server.metrics.system.MetricsService;
import org.apache.ambari.server.orm.GuiceJpaInitializer;
import org.apache.ambari.server.orm.PersistenceType;
+import org.apache.ambari.server.orm.dao.BlueprintDAO;
import org.apache.ambari.server.orm.dao.ClusterDAO;
import org.apache.ambari.server.orm.dao.GroupDAO;
import org.apache.ambari.server.orm.dao.MetainfoDAO;
@@ -113,6 +114,7 @@
import org.apache.ambari.server.security.unsecured.rest.CertificateDownload;
import org.apache.ambari.server.security.unsecured.rest.CertificateSign;
import org.apache.ambari.server.security.unsecured.rest.ConnectionInfo;
+import org.apache.ambari.server.stack.UpdateActiveRepoVersionOnStartup;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.topology.AmbariContext;
import org.apache.ambari.server.topology.BlueprintFactory;
@@ -946,6 +948,7 @@ public void performStaticInjection() {
injector.getInstance(TopologyRequestFactoryImpl.class), injector.getInstance(SecurityConfigurationFactory
.class), injector.getInstance(Gson.class));
HostResourceProvider.setTopologyManager(injector.getInstance(TopologyManager.class));
+ BlueprintFactory.init(injector.getInstance(BlueprintDAO.class));
BaseClusterRequest.init(injector.getInstance(BlueprintFactory.class));
AmbariContext.init(injector.getInstance(HostRoleCommandFactory.class));
@@ -1074,7 +1077,7 @@ private static void loadRequestlogHandler(AmbariHandlerList handlerList, Server
HandlerCollection handlers = new HandlerCollection();
Handler[] handler = serverForAgent.getHandlers();
if(handler != null ) {
- handlers.setHandlers(handler);
+ handlers.setHandlers((Handler[])handler);
handlers.addHandler(requestLogHandler);
serverForAgent.setHandler(handlers);
}
@@ -1105,6 +1108,7 @@ public static void main(String[] args) throws Exception {
DatabaseConsistencyCheckHelper.checkDBVersionCompatible();
server = injector.getInstance(AmbariServer.class);
+ injector.getInstance(UpdateActiveRepoVersionOnStartup.class).process();
CertificateManager certMan = injector.getInstance(CertificateManager.class);
certMan.initRootCert();
KerberosChecker.checkJaasConfiguration();
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
index 0ae2676ee5a..357f1b61d86 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
@@ -169,16 +169,12 @@
import org.apache.ambari.server.state.stack.OsFamily;
import org.apache.ambari.server.state.svccomphost.ServiceComponentHostImpl;
import org.apache.ambari.server.topology.BlueprintFactory;
-import org.apache.ambari.server.topology.ComponentResolver;
-import org.apache.ambari.server.topology.DefaultStackFactory;
+import org.apache.ambari.server.topology.BlueprintValidator;
+import org.apache.ambari.server.topology.BlueprintValidatorImpl;
import org.apache.ambari.server.topology.PersistedState;
import org.apache.ambari.server.topology.PersistedStateImpl;
import org.apache.ambari.server.topology.SecurityConfigurationFactory;
-import org.apache.ambari.server.topology.StackComponentResolver;
-import org.apache.ambari.server.topology.StackFactory;
import org.apache.ambari.server.topology.tasks.ConfigureClusterTaskFactory;
-import org.apache.ambari.server.topology.validators.BasicBlueprintValidator;
-import org.apache.ambari.server.topology.validators.BlueprintValidator;
import org.apache.ambari.server.utils.PasswordUtils;
import org.apache.ambari.server.view.ViewInstanceHandlerList;
import org.eclipse.jetty.server.SessionIdManager;
@@ -418,7 +414,6 @@ protected void configure() {
bind(SecurityConfigurationFactory.class).in(Scopes.SINGLETON);
bind(PersistedState.class).to(PersistedStateImpl.class);
- bind(ComponentResolver.class).to(StackComponentResolver.class);
// factory to create LoggingRequestHelper instances for LogSearch integration
bind(LoggingRequestHelperFactory.class).to(LoggingRequestHelperFactoryImpl.class);
@@ -546,8 +541,7 @@ private void installFactories() {
bind(RegistryFactory.class).to(RegistryFactoryImpl.class);
bind(HostRoleCommandFactory.class).to(HostRoleCommandFactoryImpl.class);
bind(SecurityHelper.class).toInstance(SecurityHelperImpl.getInstance());
- bind(BlueprintValidator.class).to(BasicBlueprintValidator.class);
- bind(StackFactory.class).to(DefaultStackFactory.class);
+ bind(BlueprintValidator.class).to(BlueprintValidatorImpl.class);
bind(BlueprintFactory.class);
install(new FactoryModuleBuilder().implement(AmbariEvent.class, Names.named("userCreated"), UserCreatedEvent.class).build(AmbariEventFactory.class));
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostRequest.java
index f9d5c5701fa..0824d5361cf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostRequest.java
@@ -26,9 +26,7 @@ public class ServiceComponentHostRequest {
private String clusterName; // REF
private String serviceGroupName;
private String serviceName;
- private Long componentId;
private String componentName;
- private String componentType;
private String hostname;
private String publicHostname;
private String state;
@@ -41,31 +39,17 @@ public class ServiceComponentHostRequest {
public ServiceComponentHostRequest(String clusterName,
String serviceGroupName,
String serviceName,
- Long componentId,
String componentName,
- String componentType,
String hostname,
String desiredState) {
this.clusterName = clusterName;
this.serviceGroupName = serviceGroupName;
this.serviceName = serviceName;
- this.componentId = componentId;
this.componentName = componentName;
- this.componentType = componentType;
this.hostname = hostname;
this.desiredState = desiredState;
}
- public ServiceComponentHostRequest(String clusterName,
- String serviceGroupName,
- String serviceName,
- String componentName,
- String componentType,
- String hostname,
- String desiredState) {
- this(clusterName, serviceGroupName, serviceName, null, componentName, componentType, hostname, desiredState);
- }
-
/**
* @return the service group Name
*/
@@ -90,13 +74,6 @@ public void setServiceName(String serviceName) {
this.serviceName = serviceName;
}
- /**
- * @return the componentd
- */
- public Long getComponentId() {
- return componentId;
- }
-
/**
* @return the componentName
*/
@@ -111,22 +88,6 @@ public void setComponentName(String componentName) {
this.componentName = componentName;
}
- /**
- * @param componentId the componentId to set
- */
- public void setComponentId(Long componentId) {
- this.componentId = componentId;
- }
-
- /**
- * @return the componentType
- */
- public String getComponentType() { return componentType; }
-
- /**
- * @param componentType the componenType to set
- */
- public void setComponentType(String componentType) { this.componentType = componentType; }
/**
* @return the hostname
*/
@@ -201,9 +162,7 @@ public String toString() {
sb.append("{" + " clusterName=").append(clusterName)
.append(", serviceGroupName=").append(serviceGroupName)
.append(", serviceName=").append(serviceName)
- .append(", componentId=").append(componentId)
.append(", componentName=").append(componentName)
- .append(", componentType=").append(componentType)
.append(", hostname=").append(hostname)
.append(", publicHostname=").append(publicHostname)
.append(", desiredState=").append(desiredState)
@@ -244,9 +203,7 @@ public boolean equals(Object obj) {
return Objects.equals(clusterName, other.clusterName) &&
Objects.equals(serviceGroupName, other.serviceGroupName) &&
Objects.equals(serviceName, other.serviceName) &&
- Objects.equals(componentId, other.componentId) &&
Objects.equals(componentName, other.componentName) &&
- Objects.equals(componentType, other.componentType) &&
Objects.equals(hostname, other.hostname) &&
Objects.equals(publicHostname, other.publicHostname) &&
Objects.equals(desiredState, other.desiredState) &&
@@ -259,7 +216,7 @@ public boolean equals(Object obj) {
@Override
public int hashCode() {
- return Objects.hash(clusterName, serviceGroupName, serviceName, componentId, componentName, componentType, hostname,
- publicHostname, desiredState, state, desiredStackId, staleConfig, adminState, maintenanceState);
+ return Objects.hash(clusterName, serviceGroupName, serviceName, componentName, hostname, publicHostname,
+ desiredState, state, desiredStackId, staleConfig, adminState, maintenanceState);
}
}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java
index 76d187dfc4e..14f1d471130 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java
@@ -24,8 +24,6 @@
import org.apache.ambari.server.state.HostConfig;
import org.apache.ambari.server.state.UpgradeState;
-import io.swagger.annotations.ApiModelProperty;
-
public class ServiceComponentHostResponse {
private Long clusterId; // REF
@@ -37,7 +35,6 @@ public class ServiceComponentHostResponse {
private String serviceType;
private Long hostComponentId;
private String componentName;
- private String componentType;
private String displayName;
private String publicHostname;
private String hostname;
@@ -56,10 +53,9 @@ public class ServiceComponentHostResponse {
public ServiceComponentHostResponse(Long clusterId, String clusterName, Long serviceGroupId, String serviceGroupName,
Long serviceId, String serviceName, String serviceType, Long hostComponentId,
- String componentName, String componentType, String displayName, String hostname,
- String publicHostname, String liveState, String version, String desiredState,
- String desiredStackVersion, String desiredRepositoryVersion,
- HostComponentAdminState adminState) {
+ String componentName, String displayName, String hostname, String publicHostname,
+ String liveState, String version, String desiredState, String desiredStackVersion,
+ String desiredRepositoryVersion, HostComponentAdminState adminState) {
this.clusterId = clusterId;
this.serviceGroupId = serviceGroupId;
this.serviceGroupName = serviceGroupName;
@@ -69,7 +65,6 @@ public ServiceComponentHostResponse(Long clusterId, String clusterName, Long ser
this.serviceType = serviceType;
this.hostComponentId = hostComponentId;
this.componentName = componentName;
- this.componentType = componentType;
this.displayName = displayName;
this.hostname = hostname;
this.publicHostname = publicHostname;
@@ -154,13 +149,6 @@ public String getComponentName() {
return componentName;
}
- /**
- * @return the componentType
- */
- public String getComponentType() {
- return componentType;
- }
-
/**
* @param componentName the componentName to set
*/
@@ -168,13 +156,6 @@ public void setComponentName(String componentName) {
this.componentName = componentName;
}
- /**
- * @param componentType the componentType to set
- */
- public void setComponentType(String componentType) {
- this.componentType = componentType;
- }
-
/**
* @return the displayName
*/
@@ -358,11 +339,6 @@ public boolean equals(Object o) {
return false;
}
- if (componentType != null ?
- !componentType.equals(that.componentType) : that.componentType != null) {
- return false;
- }
-
if (displayName != null ?
!displayName.equals(that.displayName) : that.displayName != null) {
return false;
@@ -386,7 +362,6 @@ public int hashCode() {
result = 71 * result + (serviceName != null ? serviceName.hashCode() : 0);
result = 71 * result + (serviceType != null ? serviceType.hashCode() : 0);
result = 71 * result + (componentName != null ? componentName.hashCode() : 0);
- result = 71 * result + (componentType != null ? componentType.hashCode() : 0);
result = 71 * result + (displayName != null ? displayName.hashCode() : 0);
result = 71 * result + (hostname != null ? hostname.hashCode() : 0);
return result;
@@ -462,13 +437,4 @@ public UpgradeState getUpgradeState() {
return upgradeState;
}
- /**
- * Interface to help correct Swagger documentation generation
- */
- public interface ServiceComponentHostResponseSwagger extends ApiModel {
- @ApiModelProperty(name = "HostRoles")
- ServiceComponentHostResponse getServiceComponentHostResponse();
- }
-
-
}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentRequest.java
index 12fa03c26a0..f59eb984e7f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentRequest.java
@@ -19,38 +19,34 @@
package org.apache.ambari.server.controller;
-import java.util.Objects;
-
public class ServiceComponentRequest {
private String clusterName; // REF
private String serviceGroupName;
private String serviceName; // GET/CREATE/UPDATE/DELETE
private String componentName; // GET/CREATE/UPDATE/DELETE
- private String componentType;
private String desiredState; // CREATE/UPDATE
private String componentCategory;
private String recoveryEnabled; // CREATE/UPDATE
public ServiceComponentRequest(String clusterName, String serviceGroupName, String serviceName,
- String componentName, String componentType, String desiredState) {
- this(clusterName, serviceGroupName, serviceName, componentName, componentType, desiredState, null, null);
+ String componentName, String desiredState) {
+ this(clusterName, serviceGroupName, serviceName, componentName, desiredState, null, null);
}
public ServiceComponentRequest(String clusterName, String serviceGroupName, String serviceName, String componentName,
- String componentType, String desiredState, String recoveryEnabled) {
- this(clusterName, serviceGroupName, serviceName, componentName, componentType, desiredState, recoveryEnabled, null);
+ String desiredState, String recoveryEnabled) {
+ this(clusterName, serviceGroupName, serviceName, componentName, desiredState, recoveryEnabled, null);
}
public ServiceComponentRequest(String clusterName, String serviceGroupName,
- String serviceName, String componentName, String componentType,
+ String serviceName, String componentName,
String desiredState, String recoveryEnabled,
String componentCategory) {
this.clusterName = clusterName;
this.serviceGroupName = serviceGroupName;
this.serviceName = serviceName;
this.componentName = componentName;
- this.componentType = componentType;
this.desiredState = desiredState;
this.recoveryEnabled = recoveryEnabled;
this.componentCategory = componentCategory;
@@ -93,18 +89,6 @@ public void setComponentName(String componentName) {
this.componentName = componentName;
}
- /**
- * @return the componentType
- */
- public String getComponentType() { return componentType; }
-
- /**
- * @param componentType the componentType to set
- */
- public void setComponentType(String componentType) {
- this.componentType = componentType;
- }
-
/**
* @return the desiredState
*/
@@ -157,33 +141,8 @@ public void setComponentCategory(String componentCategory) {
@Override
public String toString() {
- return String.format("[clusterName=%s, serviceGroupName=%s, serviceName=%s, componentName=%s, componentType=%s, " +
+ return String.format("[clusterName=%s, serviceGroupName=%s, serviceName=%s, componentName=%s, " +
"desiredState=%s, recoveryEnabled=%s, componentCategory=%s]", clusterName, serviceGroupName,
- serviceName, componentName, componentType, desiredState, recoveryEnabled, componentCategory);
- }
-
- @Override
- public boolean equals(Object obj) {
- if (obj == this) {
- return true;
- }
- if (obj == null || getClass() != obj.getClass()) {
- return false;
- }
-
- ServiceComponentRequest other = (ServiceComponentRequest) obj;
-
- return Objects.equals(clusterName, other.clusterName) &&
- Objects.equals(serviceGroupName, other.serviceGroupName) &&
- Objects.equals(serviceName, other.serviceName) &&
- Objects.equals(componentCategory, other.componentCategory) &&
- Objects.equals(componentName, other.componentName) &&
- Objects.equals(desiredState, other.desiredState) &&
- Objects.equals(recoveryEnabled, other.recoveryEnabled);
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(clusterName, serviceGroupName, serviceName, componentCategory, componentName, desiredState, recoveryEnabled);
+ serviceName, clusterName, desiredState, recoveryEnabled, componentCategory);
}
}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentResponse.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentResponse.java
index 85dc55f33be..d63b33c1ef0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentResponse.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentResponse.java
@@ -33,9 +33,7 @@ public class ServiceComponentResponse {
private Long serviceId; // REF
private String serviceName;
private String serviceType;
- private Long componentId;
private String componentName;
- private String componentType;
private String displayName;
private String desiredStackId;
private String desiredState;
@@ -46,10 +44,10 @@ public class ServiceComponentResponse {
private RepositoryVersionState repoState;
public ServiceComponentResponse(Long clusterId, String clusterName, Long serviceGroupId, String serviceGroupName,
- Long serviceId, String serviceName, String serviceType, Long componentId, String componentName,
- String componentType, StackId desiredStackId, String desiredState,
- Map serviceComponentStateCount, boolean recoveryEnabled,
- String displayName, String desiredVersion, RepositoryVersionState repoState) {
+ Long serviceId, String serviceName, String serviceType, String componentName,
+ StackId desiredStackId, String desiredState, Map serviceComponentStateCount,
+ boolean recoveryEnabled, String displayName, String desiredVersion,
+ RepositoryVersionState repoState) {
this.clusterId = clusterId;
this.clusterName = clusterName;
this.serviceGroupId = serviceGroupId;
@@ -57,9 +55,7 @@ public ServiceComponentResponse(Long clusterId, String clusterName, Long service
this.serviceId = serviceId;
this.serviceName = serviceName;
this.serviceType = serviceType;
- this.componentId = componentId;
this.componentName = componentName;
- this.componentType = componentType;
this.displayName = displayName;
this.desiredStackId = desiredStackId.getStackId();
this.desiredState = desiredState;
@@ -137,34 +133,6 @@ public void setComponentName(String componentName) {
this.componentName = componentName;
}
- /**
- * @param componentId the componentId to set
- */
- public void setComponentName(Long componentId) {
- this.componentId = componentId;
- }
-
- /**
- * @return the componentType
- */
- public String getComponentType() {
- return componentType;
- }
-
- /**
- * @param componentType the componentType to set
- */
- public void setComponentType(String componentType) {
- this.componentType = componentType;
- }
-
- /**
- * @return the componentId
- */
- public Long getComponentId() {
- return componentId;
- }
-
/**
* @return the displayName
*/
@@ -325,21 +293,11 @@ public boolean equals(Object o) {
return false;
}
- if (componentId != null ?
- !componentId.equals(that.componentId) : that.componentId != null) {
- return false;
- }
-
if (componentName != null ?
!componentName.equals(that.componentName) : that.componentName != null){
return false;
}
- if (componentType != null ?
- !componentType.equals(that.componentType) : that.componentType != null){
- return false;
- }
-
if (displayName != null ?
!displayName.equals(that.displayName) : that.displayName != null) {
return false;
@@ -357,9 +315,7 @@ public int hashCode() {
result = 71 * result + (serviceId != null ? serviceId.hashCode() : 0);
result = 71 * result + (serviceName != null ? serviceName.hashCode() : 0);
result = 71 * result + (serviceType != null ? serviceType.hashCode() : 0);
- result = 71 * result + (componentId != null ? componentId.hashCode() : 0);
result = 71 * result + (componentName != null ? componentName.hashCode():0);
- result = 71 * result + (componentType != null ? componentType.hashCode():0);
result = 71 * result + (displayName != null ? displayName.hashCode():0);
return result;
}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java
index 4516b555069..70e5240c38a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java
@@ -17,8 +17,6 @@
*/
package org.apache.ambari.server.controller;
-import java.util.Objects;
-
import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
import org.apache.ambari.server.state.StackId;
@@ -198,32 +196,4 @@ public String toString() {
public void setResolvedRepository(RepositoryVersionEntity repositoryVersion) { resolvedRepository = repositoryVersion; }
public RepositoryVersionEntity getResolvedRepository() { return resolvedRepository; }
-
- @Override
- public boolean equals(Object obj) {
- if (obj == this) {
- return true;
- }
- if (obj == null || getClass() != obj.getClass()) {
- return false;
- }
-
- ServiceRequest other = (ServiceRequest) obj;
-
- return Objects.equals(clusterName, other.clusterName) &&
- Objects.equals(serviceGroupName, other.serviceGroupName) &&
- Objects.equals(serviceType, other.serviceType) &&
- Objects.equals(serviceName, other.serviceName) &&
- Objects.equals(desiredState, other.desiredState) &&
- Objects.equals(maintenanceState, other.maintenanceState) &&
- Objects.equals(credentialStoreEnabled, other.credentialStoreEnabled) &&
- Objects.equals(credentialStoreSupported, other.credentialStoreSupported) &&
- Objects.equals(desiredStackId, other.desiredStackId) &&
- Objects.equals(desiredRepositoryVersionId, other.desiredRepositoryVersionId);
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(clusterName, serviceGroupName, serviceType, serviceName, desiredState, maintenanceState, credentialStoreEnabled, credentialStoreSupported, desiredStackId, desiredRepositoryVersionId);
- }
}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceResponse.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceResponse.java
index 65cd5734568..73a2c93a9ca 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceResponse.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceResponse.java
@@ -18,8 +18,6 @@
package org.apache.ambari.server.controller;
-import org.apache.ambari.annotations.Experimental;
-import org.apache.ambari.annotations.ExperimentalFeature;
import org.apache.ambari.server.state.RepositoryVersionState;
import org.apache.ambari.server.state.StackId;
@@ -193,12 +191,8 @@ public RepositoryVersionState getRepositoryVersionState() {
@Override
public boolean equals(Object o) {
- if (this == o) {
- return true;
- }
- if (o == null || getClass() != o.getClass()) {
- return false;
- }
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
ServiceResponse that = (ServiceResponse) o;
@@ -308,16 +302,12 @@ public interface ServiceResponseSwagger extends ApiModel {
/**
* @param id
*/
- @Deprecated
- @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL)
public void setDesiredRepositoryVersionId(Long id) {
desiredRepositoryVersionId = id;
}
/**
*/
- @Deprecated
- @Experimental(feature = ExperimentalFeature.REPO_VERSION_REMOVAL)
public Long getDesiredRepositoryVersionId() {
return desiredRepositoryVersionId;
}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
index b30cd333ee8..f385f7ef3fa 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
@@ -505,17 +505,10 @@ public Host getHost(String clusterName, String hostName) {
public boolean isCollectorComponentLive(String clusterName, MetricsService service) throws SystemException {
final String collectorHostName = getCollectorHostName(clusterName, service);
- Long componentId = null;
- try {
- componentId = managementController.getClusters().getCluster(clusterName).getComponentId(Role.METRICS_COLLECTOR.name());
- } catch (AmbariException e) {
- e.printStackTrace();
- }
if (service.equals(GANGLIA)) {
- // TODO : Multi_Metrics_Changes. Is there is more than one instance of GANGLIA_SERVER, type and name would be different.
return HostStatusHelper.isHostComponentLive(managementController, clusterName, collectorHostName, "GANGLIA",
- componentId, Role.GANGLIA_SERVER.name(), Role.GANGLIA_SERVER.name());
+ Role.GANGLIA_SERVER.name());
} else if (service.equals(TIMELINE_METRICS)) {
return metricsCollectorHAManager.isCollectorComponentLive(clusterName);
}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BaseClusterRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BaseClusterRequest.java
index 0d243a2f551..77eafebf4bd 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BaseClusterRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BaseClusterRequest.java
@@ -29,7 +29,6 @@
import org.apache.ambari.server.controller.spi.Resource;
import org.apache.ambari.server.controller.spi.ResourceProvider;
import org.apache.ambari.server.controller.utilities.ClusterControllerHelper;
-import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.topology.Blueprint;
import org.apache.ambari.server.topology.BlueprintFactory;
import org.apache.ambari.server.topology.Configuration;
@@ -37,7 +36,6 @@
import org.apache.ambari.server.topology.InvalidTopologyTemplateException;
import org.apache.ambari.server.topology.SecurityConfiguration;
import org.apache.ambari.server.topology.TopologyRequest;
-import org.apache.ambari.server.topology.TopologyRequestUtil;
/**
* Provides common cluster request functionality.
@@ -55,11 +53,6 @@ public abstract class BaseClusterRequest implements TopologyRequest {
protected ProvisionAction provisionAction;
- /**
- * The raw request body. We would like to persist it.
- */
- protected String rawRequestBody;
-
/**
* cluster id
*/
@@ -125,19 +118,6 @@ public Map getHostGroupInfo() {
return hostGroupInfoMap;
}
- /**
- * @return the raw request body in JSON string
- */
- public String getRawRequestBody() {
- return rawRequestBody;
- }
-
- @Override
- public Set getStackIds() {
- return TopologyRequestUtil.getStackIdsFromRequest(
- TopologyRequestUtil.getPropertyMap(rawRequestBody));
- }
-
/**
* Validate that all properties specified in the predicate are valid for the Host resource.
*
@@ -200,7 +180,6 @@ public SecurityConfiguration getSecurityConfiguration() {
return securityConfiguration;
}
-
/**
* Get the host resource provider instance.
*
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index 5ea81c34418..8da1f51ee2c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -44,10 +44,12 @@
import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.ValueAttributesInfo;
import org.apache.ambari.server.topology.AdvisedConfiguration;
+import org.apache.ambari.server.topology.Blueprint;
import org.apache.ambari.server.topology.Cardinality;
import org.apache.ambari.server.topology.ClusterTopology;
import org.apache.ambari.server.topology.ConfigRecommendationStrategy;
import org.apache.ambari.server.topology.Configuration;
+import org.apache.ambari.server.topology.HostGroup;
import org.apache.ambari.server.topology.HostGroupInfo;
import org.apache.ambari.server.topology.validators.UnitValidatedProperty;
import org.apache.commons.lang.StringUtils;
@@ -69,11 +71,6 @@ public class BlueprintConfigurationProcessor {
private static final Logger LOG = LoggerFactory.getLogger(BlueprintConfigurationProcessor.class);
- /**
- * Compiled regex for "%HOSTGROUP::...%" token.
- */
- public static final Pattern HOST_GROUP_PLACEHOLDER_PATTERN = Pattern.compile("%HOSTGROUP::(\\S+?)%");
-
private final static String COMMAND_RETRY_ENABLED_PROPERTY_NAME = "command_retry_enabled";
private final static String COMMANDS_TO_RETRY_PROPERTY_NAME = "commands_to_retry";
@@ -168,23 +165,6 @@ public class BlueprintConfigurationProcessor {
private static Set configPropertiesWithHASupport =
new HashSet<>(Arrays.asList("fs.defaultFS", "hbase.rootdir", "instance.volumes", "policymgr_external_url", "xasecure.audit.destination.hdfs.dir"));
- public static boolean isNameNodeHAEnabled(Map> configurationProperties) {
- return configurationProperties.containsKey("hdfs-site") &&
- (configurationProperties.get("hdfs-site").containsKey("dfs.nameservices") ||
- configurationProperties.get("hdfs-site").containsKey("dfs.internal.nameservices"));
- }
-
- /**
- * Static convenience function to determine if Yarn ResourceManager HA is enabled
- * @param configProperties configuration properties for this cluster
- * @return true if Yarn ResourceManager HA is enabled
- * false if Yarn ResourceManager HA is not enabled
- */
- public static boolean isYarnResourceManagerHAEnabled(Map> configProperties) {
- return configProperties.containsKey("yarn-site") && configProperties.get("yarn-site").containsKey("yarn.resourcemanager.ha.enabled")
- && configProperties.get("yarn-site").get("yarn.resourcemanager.ha.enabled").equals("true");
- }
-
/**
* Statically-defined list of filters to apply on property exports.
* This will initially be used to filter out the Ranger Passwords, but
@@ -299,7 +279,7 @@ private boolean containsHostFromHostGroups(String configType, String propertyNam
return false;
}
// check fir bp import
- Matcher m = HOST_GROUP_PLACEHOLDER_PATTERN.matcher(propertyValue);
+ Matcher m = HostGroup.HOSTGROUP_REGEX.matcher(propertyValue);
if (m.find()) {
return true;
}
@@ -370,8 +350,7 @@ public Set doUpdateForClusterCreate() throws ConfigurationTopologyExcept
// set of properties (copy) doesn't include the removed properties. If an updater
// removes a property other than the property it is registered for then we will
// have an issue as it won't be removed from the clusterProps map as it is a copy.
- Map> configProperties = clusterConfig.getFullProperties();
- Map> clusterProps = configProperties;
+ Map> clusterProps = clusterConfig.getFullProperties();
for (Map> updaterMap : createCollectionOfUpdaters()) {
for (Map.Entry> entry : updaterMap.entrySet()) {
String type = entry.getKey();
@@ -419,10 +398,10 @@ public Set doUpdateForClusterCreate() throws ConfigurationTopologyExcept
}
//todo: lots of hard coded HA rules included here
- if (isNameNodeHAEnabled(configProperties)) {
+ if (clusterTopology.isNameNodeHAEnabled()) {
// add "dfs.internal.nameservices" if it's not specified
- Map hdfsSiteConfig = configProperties.get("hdfs-site");
+ Map hdfsSiteConfig = clusterConfig.getFullProperties().get("hdfs-site");
String nameservices = hdfsSiteConfig.get("dfs.nameservices");
String int_nameservices = hdfsSiteConfig.get("dfs.internal.nameservices");
if(int_nameservices == null && nameservices != null) {
@@ -451,7 +430,7 @@ public Set doUpdateForClusterCreate() throws ConfigurationTopologyExcept
setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
setRetryConfiguration(clusterConfig, configTypesUpdated);
setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
- addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getStack());
+ addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
trimProperties(clusterConfig, clusterTopology);
@@ -459,7 +438,8 @@ public Set doUpdateForClusterCreate() throws ConfigurationTopologyExcept
}
private void trimProperties(Configuration clusterConfig, ClusterTopology clusterTopology) {
- StackDefinition stack = clusterTopology.getStack();
+ Blueprint blueprint = clusterTopology.getBlueprint();
+ StackDefinition stack = blueprint.getStack();
Map> configTypes = clusterConfig.getFullProperties();
for (String configType : configTypes.keySet()) {
@@ -507,16 +487,15 @@ private static boolean shouldPropertyBeStoredWithDefault(String propertyName) {
*/
public void doUpdateForBlueprintExport() {
// HA configs are only processed in cluster configuration, not HG configurations
- Map> configProperties = clusterTopology.getConfiguration().getFullProperties();
- if (isNameNodeHAEnabled(configProperties)) {
+ if (clusterTopology.isNameNodeHAEnabled()) {
doNameNodeHAUpdate();
}
- if (isYarnResourceManagerHAEnabled(configProperties)) {
+ if (clusterTopology.isYarnResourceManagerHAEnabled()) {
doYarnResourceManagerHAUpdate();
}
- if (isOozieServerHAEnabled(configProperties)) {
+ if (isOozieServerHAEnabled(clusterTopology.getConfiguration().getFullProperties())) {
doOozieServerHAUpdate();
}
@@ -562,7 +541,7 @@ private void doFilterPriorToExport(Configuration configuration) {
String clusterName = clusterTopology.getAmbariContext().getClusterName(clusterTopology.getClusterId());
Cluster cluster = clusterTopology.getAmbariContext().getController().getClusters().getCluster(clusterName);
authToLocalPerClusterMap = new HashMap<>();
- authToLocalPerClusterMap.put(clusterTopology.getClusterId(), clusterTopology.getAmbariContext().getController().getKerberosHelper().getKerberosDescriptor(cluster, false).getAllAuthToLocalProperties());
+ authToLocalPerClusterMap.put(Long.valueOf(clusterTopology.getClusterId()), clusterTopology.getAmbariContext().getController().getKerberosHelper().getKerberosDescriptor(cluster, false).getAllAuthToLocalProperties());
} catch (AmbariException e) {
LOG.error("Error while getting authToLocal properties. ", e);
}
@@ -570,7 +549,7 @@ private void doFilterPriorToExport(Configuration configuration) {
for (Map.Entry> configEntry : properties.entrySet()) {
String type = configEntry.getKey();
try {
- clusterTopology.getStack().getServiceForConfigType(type);
+ clusterTopology.getBlueprint().getStack().getServiceForConfigType(type);
} catch (IllegalArgumentException illegalArgumentException) {
LOG.error(new StringBuilder(String.format("Error encountered while trying to obtain the service name for config type [%s]. ", type))
.append("Further processing on this config type will be skipped. ")
@@ -644,7 +623,8 @@ private void doRecommendConfigurations(Configuration configuration, Set
* @param advisedConfigurations advised configuration instance
*/
private void doFilterStackDefaults(Map advisedConfigurations) {
- Configuration stackDefaults = clusterTopology.getStack().getConfiguration(clusterTopology.getServices());
+ Blueprint blueprint = clusterTopology.getBlueprint();
+ Configuration stackDefaults = blueprint.getStack().getConfiguration(blueprint.getServices());
Map> stackDefaultProps = stackDefaults.getProperties();
for (Map.Entry adConfEntry : advisedConfigurations.entrySet()) {
AdvisedConfiguration advisedConfiguration = adConfEntry.getValue();
@@ -722,16 +702,15 @@ private void doRemovePropertiesIfNeeded(Configuration configuration,
private Collection